-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscrape.py
More file actions
56 lines (42 loc) · 2.21 KB
/
scrape.py
File metadata and controls
56 lines (42 loc) · 2.21 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
from urllib.parse import urljoin
from bs4 import BeautifulSoup, SoupStrainer
import requests_cache
from logging import basicConfig, getLogger, INFO
basicConfig(level=INFO, filename="app.log", filemode="w")
logger = getLogger(__name__)
url = "https://www.energyeconomicgrowth.org/www.energyeconomicgrowth.org/content/publications.html"
publication_links = []
unexplored_links = []
explored_links = []
session = requests_cache.CachedSession("cache", expire_after=30)
strainer = SoupStrainer("a", href=True)
def get_publication_links(url):
logger.info(f"Unexplored links: {unexplored_links}.")
logger.info(f"Explored links: {explored_links}.")
response = session.get(url)
soup = BeautifulSoup(response.text, "html.parser", parse_only=strainer)
for link in soup.select("a[href$='.html']"):
link_to_check = str(link['href'])
if "publication" in link_to_check and "publications.html" not in link_to_check:
if "/publication/" in link_to_check and link_to_check not in publication_links:
publication_links.append(link_to_check)
logger.info(f"Found publication link: {link_to_check}")
elif link_to_check not in explored_links and link_to_check not in unexplored_links and link_to_check not in url :
logger.info(f"Found new publication page: {link_to_check}")
unexplored_links.append(link_to_check)
return publication_links
with open('publication_links.csv', 'w') as f:
publications = get_publication_links(url)
logger.info(f"Found {len(publications)} publications.")
f.writelines("\n".join(publications))
if sorted(set(publications)) != sorted(publications):
raise ValueError("Duplicate publications found.")
while len(unexplored_links) > 0:
next_link = unexplored_links.pop(0)
explored_links.append(next_link)
publications = get_publication_links(urljoin(url, next_link))
logger.info(f"Found {len(publications)} publications.")
f.writelines("\n".join(publications))
if sorted(set(publications)) != sorted(publications):
print(f"{len(publications) - len(set(publications))} duplicate publications found.")
raise ValueError("Duplicate publications found.")