qotnews/apiserver/scrapers/headless.py

42 lines
1.2 KiB
Python
Raw Normal View History

2020-11-11 09:26:54 +00:00
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
import requests
2020-11-17 02:50:31 +00:00
READ_API = 'http://127.0.0.1:33843/headless/details'
READ_COMMENT__API = 'http://127.0.0.1:33843/headless/comments'
2020-11-11 09:26:54 +00:00
TIMEOUT = 60
def get_html(url):
2020-11-17 02:50:31 +00:00
logging.info(f"Headless Browser Scraper: {url}")
2020-11-11 09:26:54 +00:00
details = get_details(url)
if not details:
return ''
return details['content']
def get_details(url):
try:
r = requests.post(READ_API, data=dict(url=url), timeout=TIMEOUT)
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
2020-11-17 02:50:31 +00:00
logging.error('Problem scraping article: {}'.format(str(e)))
2020-11-11 09:26:54 +00:00
return None
def get_comments(url):
try:
r = requests.post(READ_COMMENT_API, data=dict(url=url), timeout=TIMEOUT)
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem getting comments for article: {}'.format(str(e)))
return None