1 | # ********************************************************************************************** |
---|
2 | # This code scans the EXOMOL webiste, and lists all available molecules and linelists. |
---|
3 | # It writes to files: "Exomol_species.dat" and "Exomol_xsec_species.dat", and lists the |
---|
4 | # corresponding file names |
---|
5 | |
---|
6 | # Date: May 2019 |
---|
7 | # Author: Simon Grimm |
---|
8 | # |
---|
9 | # ********************************************************************************************* |
---|
10 | |
---|
11 | from bs4 import BeautifulSoup |
---|
12 | import requests |
---|
13 | import sys |
---|
14 | |
---|
15 | #this function extracts the ranges of the .trans files |
---|
16 | #it returns the ranges, or -1 when the ranges are not equal |
---|
17 | #it returns the number of transition files |
---|
18 | #it returns the number of digits of the ranges |
---|
19 | def transitionRanges(url): |
---|
20 | #url="http://exomol.com/data/molecules/H2O/1H2-16O/BT2/" |
---|
21 | |
---|
22 | page = requests.get(url).text |
---|
23 | soup = BeautifulSoup(page, "html.parser") |
---|
24 | List = soup.find_all('li', attrs={"class" : "list-group-item link-list-group-item"}) |
---|
25 | |
---|
26 | #print(List[4].a) |
---|
27 | #print(List[4].a.get('href')) |
---|
28 | |
---|
29 | transList = [] |
---|
30 | |
---|
31 | #write a list with all transition file ranges |
---|
32 | for i in range(len(List)): |
---|
33 | el = List[i].a.get('href') |
---|
34 | #print(el) |
---|
35 | el1 = el.split('__')[-1] #split at __ and take right part of it |
---|
36 | el2 = el1.split('.trans')[0] |
---|
37 | if(len(el1.split('.trans')) > 1): |
---|
38 | #print(el2) |
---|
39 | transList.append(el2) |
---|
40 | |
---|
41 | rangesList = [] |
---|
42 | if(len(transList) > 1): |
---|
43 | #check range of files |
---|
44 | for x in transList: |
---|
45 | #print(x) |
---|
46 | try: |
---|
47 | x0 = float(x.split('-')[0]) |
---|
48 | x1 = float(x.split('-')[1]) |
---|
49 | dg = len(x.split('-')[0]) |
---|
50 | #print(x1-x0) |
---|
51 | rangesList.append(x1-x0) |
---|
52 | except: |
---|
53 | print("error", url, x) |
---|
54 | return(0, 0, 0) |
---|
55 | |
---|
56 | s = rangesList[0] |
---|
57 | for r in rangesList: |
---|
58 | if(r != s): |
---|
59 | s=-1 |
---|
60 | n = len(rangesList) |
---|
61 | else: |
---|
62 | s = 0 |
---|
63 | n = 1 |
---|
64 | dg = 0 |
---|
65 | #print(s, n) |
---|
66 | return(s, n, dg) |
---|
67 | |
---|
68 | |
---|
69 | def main(): |
---|
70 | |
---|
71 | print("Scan Exomol webiste for file names") |
---|
72 | |
---|
73 | url="http://exomol.com/data/atoms/" |
---|
74 | page = requests.get(url).text |
---|
75 | soup = BeautifulSoup(page, "html.parser") |
---|
76 | |
---|
77 | List = soup.find_all('a', attrs={"class" : "list-group-item link-list-group-item molecule_link"}) |
---|
78 | |
---|
79 | url="http://exomol.com/data/molecules/" |
---|
80 | page = requests.get(url).text |
---|
81 | soup = BeautifulSoup(page, "html.parser") |
---|
82 | |
---|
83 | List += soup.find_all('a', attrs={"class" : "list-group-item link-list-group-item molecule_link"}) |
---|
84 | |
---|
85 | efile = open("Exomol_species.dat", "w", buffering=1) |
---|
86 | exfile = open("Exomol_xsec_species.dat", "w", buffering=1) |
---|
87 | |
---|
88 | if(len(List) == 0): |
---|
89 | print("Error, no molecules found, maybe the Exomol homepage has changed") |
---|
90 | sys.exit(100) |
---|
91 | |
---|
92 | #Molecule |
---|
93 | for i in range(len(List)): |
---|
94 | #for i in range(20): |
---|
95 | el = List[i].get('href').split('data/molecules/')[1] |
---|
96 | print(el) |
---|
97 | |
---|
98 | url1 = url + el + "/" |
---|
99 | page1 = requests.get(url1).text |
---|
100 | soup1 = BeautifulSoup(page1, "html.parser") |
---|
101 | |
---|
102 | List1 = soup1.find_all('a', attrs={"class" : "list-group-item link-list-group-item"}) |
---|
103 | |
---|
104 | #Isotopologue |
---|
105 | for j in range(len(List1)): |
---|
106 | el1 = List1[j].get('href') |
---|
107 | |
---|
108 | print(" ", el1) |
---|
109 | |
---|
110 | url2 = url1 + el1 + "/" |
---|
111 | page2 = requests.get(url2).text |
---|
112 | soup2 = BeautifulSoup(page2, "html.parser") |
---|
113 | #print(soup2) |
---|
114 | |
---|
115 | List2 = soup2.find_all('a', attrs={"class" : "list-group-item link-list-group-item "}) |
---|
116 | List2 += soup2.find_all('a', attrs={"class" : "list-group-item link-list-group-item"}) |
---|
117 | List2 += soup2.find_all('a', attrs={"class" : "list-group-item link-list-group-item recommended"}) |
---|
118 | |
---|
119 | #Line list |
---|
120 | for k in range(len(List2)): |
---|
121 | el2 = List2[k].get('href') |
---|
122 | |
---|
123 | el3 = el2.replace("HITEMP", "HITEMP2010") |
---|
124 | |
---|
125 | if(el2.find("xsec-") >= 0): |
---|
126 | |
---|
127 | #change HITEMP to HITEMP2010, but only in the prints |
---|
128 | |
---|
129 | name = el2.split("xsec-")[1] |
---|
130 | p3 = el1 + "__" + name |
---|
131 | print("%-16s %-24s %-32s %-32s" % (el, el1, el3, p3), file=exfile) |
---|
132 | |
---|
133 | else: |
---|
134 | print(" ", el1 + "__" + el3, el + "/" + el1 + "/" + el2 ) |
---|
135 | |
---|
136 | p2 = el1 + "__" + el3 |
---|
137 | p3 = el + "/" + el1 + "/" + el2 |
---|
138 | |
---|
139 | url3 = url2 + el2 |
---|
140 | #print(url3) |
---|
141 | s, n, dg = transitionRanges(url3) |
---|
142 | print(s, n, dg) |
---|
143 | print("%-16s %-24s %-32s %-40s %8g %8g %8g" % (el, el1, p2, p3, s, n, dg), file=efile) |
---|
144 | |
---|
145 | |
---|
146 | efile.close() |
---|
147 | print("Scan complete") |
---|
148 | |
---|
149 | if __name__ == '__main__': |
---|
150 | main() |
---|