qotnews/apiserver/feeds/news.py

270 lines
9.2 KiB
Python
Raw Normal View History

2020-11-03 22:08:50 +00:00
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
2020-11-05 22:20:34 +00:00
level=logging.DEBUG)
2020-11-03 22:08:50 +00:00
if __name__ == '__main__':
import sys
sys.path.insert(0,'.')
import requests
from datetime import datetime
from bs4 import BeautifulSoup
2020-11-05 01:23:51 +00:00
from scrapers import declutter
import dateutil.parser
2020-11-03 22:08:50 +00:00
import extruct
2020-11-05 03:30:55 +00:00
import pytz
2020-11-03 22:08:50 +00:00
from utils import clean
2020-11-12 01:51:53 +00:00
import settings
2020-11-03 22:08:50 +00:00
2020-11-10 03:51:27 +00:00
tzinfos = {
'NZDT': pytz.timezone('Pacific/Auckland'),
'NZST': pytz.timezone('Pacific/Auckland')
}
2020-11-03 22:08:50 +00:00
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0'
2020-11-05 20:28:55 +00:00
#USER_AGENT = "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
2020-11-05 03:30:55 +00:00
def unix(date_str, tz=None):
try:
2020-11-10 03:51:27 +00:00
dt = dateutil.parser.parse(date_str, tzinfos=tzinfos)
if tz:
dt = pytz.timezone(tz).localize(dt)
return int(dt.timestamp())
except:
pass
2020-11-03 22:08:50 +00:00
return 0
2020-11-03 22:08:50 +00:00
def xml(route, ref=None):
try:
headers = {'User-Agent': USER_AGENT, 'X-Forwarded-For': '66.249.66.1'}
r = requests.get(route(ref), headers=headers, timeout=5)
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.text
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem hitting URL: {}'.format(str(e)))
return False
2020-11-03 22:08:50 +00:00
def parse_extruct(s, data):
2020-11-10 03:51:27 +00:00
rdfa_keys = {
'title': [
'http://ogp.me/ns#title',
'https://ogp.me/ns#title',
],
'date': [
'http://ogp.me/ns/article#modified_time',
'https://ogp.me/ns/article#modified_time',
'http://ogp.me/ns/article#published_time',
'https://ogp.me/ns/article#published_time',
]
}
2020-11-03 22:08:50 +00:00
for rdfa in data['rdfa']:
2020-11-10 03:51:27 +00:00
for key, props in rdfa.items():
for attribute, properties in rdfa_keys.items():
for prop in properties:
if prop in props:
for values in props[prop]:
s[attribute] = values['@value']
2020-11-03 22:08:50 +00:00
for og in data['opengraph']:
titles = list(filter(None, [value if 'og:title' in key else None for key, value in og['properties']]))
modified = list(filter(None, [value if 'article:modified_time' in key else None for key, value in og['properties']]))
published = list(filter(None, [value if 'article:published_time' in key else None for key, value in og['properties']]))
if len(modified):
2020-11-05 03:30:55 +00:00
s['date'] = modified[0]
2020-11-03 22:08:50 +00:00
if len(published):
2020-11-05 03:30:55 +00:00
s['date'] = published[0]
2020-11-03 22:08:50 +00:00
if len(titles):
s['title'] = titles[0]
for md in data['microdata']:
2020-11-10 03:51:27 +00:00
if md['type'] in ['https://schema.org/NewsArticle', 'http://schema.org/NewsArticle']:
2020-11-03 22:08:50 +00:00
props = md['properties']
s['title'] = props['headline']
if props['dateModified']:
2020-11-05 03:30:55 +00:00
s['date'] = props['dateModified']
2020-11-03 22:08:50 +00:00
if props['datePublished']:
2020-11-05 03:30:55 +00:00
s['date'] = props['datePublished']
2020-11-03 22:08:50 +00:00
if 'author' in props and props['author']:
s['author'] = props['author']['properties']['name']
for ld in data['json-ld']:
2020-11-05 01:23:51 +00:00
if '@type' in ld and ld['@type'] in ['Article', 'NewsArticle']:
2020-11-03 22:08:50 +00:00
s['title'] = ld['headline']
if ld['dateModified']:
2020-11-05 03:30:55 +00:00
s['date'] = ld['dateModified']
2020-11-03 22:08:50 +00:00
if ld['datePublished']:
2020-11-05 03:30:55 +00:00
s['date'] = ld['datePublished']
2020-11-03 22:08:50 +00:00
if 'author' in ld and ld['author']:
2020-11-12 02:08:23 +00:00
if 'name' in ld['author']:
s['author'] = ld['author']['name']
elif len(ld['author']):
s['author'] = ld['author'][0]['name']
2020-11-05 01:23:51 +00:00
if '@graph' in ld:
for gld in ld['@graph']:
if '@type' in gld and gld['@type'] in ['Article', 'NewsArticle']:
s['title'] = gld['headline']
if gld['dateModified']:
2020-11-05 03:30:55 +00:00
s['date'] = gld['dateModified']
2020-11-05 01:23:51 +00:00
if gld['datePublished']:
2020-11-05 03:30:55 +00:00
s['date'] = gld['datePublished']
2020-11-03 22:08:50 +00:00
return s
2020-11-05 01:23:51 +00:00
def comment(i):
if 'author' not in i:
return False
2020-11-05 01:23:51 +00:00
c = {}
c['author'] = i.get('author', '')
c['score'] = i.get('points', 0)
c['date'] = unix(i.get('date', 0))
c['text'] = clean(i.get('text', '') or '')
c['comments'] = [comment(j) for j in i['children']]
c['comments'] = list(filter(bool, c['comments']))
return c
2020-11-03 22:08:50 +00:00
2020-11-05 01:23:51 +00:00
def comment_count(i):
alive = 1 if i['author'] else 0
return sum([comment_count(c) for c in i['comments']]) + alive
2020-11-03 22:08:50 +00:00
2020-11-05 01:23:51 +00:00
class _Base:
2020-11-05 03:30:55 +00:00
def __init__(url, tz=None):
self.url = url
self.tz = tz
2020-11-05 02:59:13 +00:00
def feed(self, excludes=None):
return []
2020-11-03 22:08:50 +00:00
def story(self, ref):
markup = xml(lambda x: ref)
if not markup:
2020-11-03 22:08:50 +00:00
return False
s = {}
s['author_link'] = ''
s['score'] = 0
2020-11-03 22:08:50 +00:00
s['comments'] = []
s['num_comments'] = 0
s['link'] = ref
s['url'] = ref
s['date'] = 0
2020-11-03 22:08:50 +00:00
soup = BeautifulSoup(markup, features='html.parser')
icon32 = soup.find_all('link', rel="icon", href=True, sizes="32x32")
icon16 = soup.find_all('link', rel="icon", href=True, sizes="16x16")
favicon = soup.find_all('link', rel="shortcut icon", href=True)
others = soup.find_all('link', rel="icon", href=True)
icons = icon32 + icon16 + favicon + others
2020-11-10 02:34:21 +00:00
base_url = '/'.join(ref.split('/')[:3])
icons = list(set([i.get('href') for i in icons]))
2020-11-10 02:34:21 +00:00
icons = [i if i.startswith('http') else base_url + i for i in icons]
if icons:
s['icon'] = icons[0]
data = extruct.extract(markup)
2020-11-03 22:08:50 +00:00
s = parse_extruct(s, data)
2020-11-05 03:41:15 +00:00
if s['date']:
s['date'] = unix(s['date'], tz=self.tz)
2020-11-04 03:34:31 +00:00
2020-11-05 01:23:51 +00:00
if 'disqus' in markup:
try:
s['comments'] = declutter.get_comments(ref)
c['comments'] = list(filter(bool, c['comments']))
s['num_comments'] = comment_count(s['comments'])
except KeyboardInterrupt:
raise
except:
pass
2020-11-04 03:34:31 +00:00
if not s['date']:
return False
2020-11-03 22:08:50 +00:00
return s
2020-11-05 21:37:43 +00:00
def get_sitemap_date(a):
if a.find('lastmod'):
return a.find('lastmod').text
if a.find('news:publication_date'):
return a.find('news:publication_date').text
2020-11-10 01:09:56 +00:00
if a.find('ns2:publication_date'):
return a.find('ns2:publication_date').text
2020-11-05 21:37:43 +00:00
return ''
2020-11-05 01:23:51 +00:00
class Sitemap(_Base):
2020-11-05 03:30:55 +00:00
def __init__(self, url, tz=None):
self.tz = tz
2020-11-05 01:23:51 +00:00
self.sitemap_url = url
2020-11-12 01:51:53 +00:00
def _feed(self, feed_url, excludes=None):
too_old = datetime.now().timestamp() - settings.MAX_STORY_AGE
markup = xml(lambda x: feed_url)
2020-11-05 01:23:51 +00:00
if not markup: return []
soup = BeautifulSoup(markup, features='lxml')
2020-11-12 01:51:53 +00:00
if soup.find('sitemapindex'):
sitemap = soup.find('sitemapindex').findAll('sitemap')
else:
sitemap = soup.find('urlset').findAll('url')
2020-11-05 21:37:43 +00:00
links = list(filter(None, [a if a.find('loc') else None for a in sitemap]))
links = list(filter(None, [a if get_sitemap_date(a) else None for a in links]))
2020-11-12 01:51:53 +00:00
links = list(filter(None, [a if unix(get_sitemap_date(a)) > too_old else None for a in links]))
2020-11-05 21:37:43 +00:00
links.sort(key=lambda a: unix(get_sitemap_date(a)), reverse=True)
2020-11-12 01:51:53 +00:00
2020-11-05 21:37:43 +00:00
links = [x.find('loc').text for x in links] or []
2020-11-05 01:23:51 +00:00
links = list(set(links))
2020-11-05 02:59:13 +00:00
if excludes:
links = list(filter(None, [None if any(e in link for e in excludes) else link for link in links]))
2020-11-12 01:51:53 +00:00
feed_urls = list(filter(None, [l if l.endswith(".xml") else None for l in links]))
urls = list(set(links) - set(feed_urls))
for url in feed_urls:
urls += self._feed(url, excludes)
return urls
def feed(self, excludes=None):
return self._feed(self.sitemap_url, excludes)
2020-11-05 01:23:51 +00:00
2020-11-05 01:23:51 +00:00
class Category(_Base):
2020-11-05 03:30:55 +00:00
def __init__(self, url, tz=None):
self.tz = tz
2020-11-03 22:08:50 +00:00
self.category_url = url
self.base_url = '/'.join(url.split('/')[:3])
2020-11-05 02:59:13 +00:00
def feed(self, excludes=None):
2020-11-03 22:08:50 +00:00
markup = xml(lambda x: self.category_url)
if not markup: return []
soup = BeautifulSoup(markup, features='html.parser')
links = soup.find_all('a', href=True)
links = [link.get('href') for link in links]
links = [f"{self.base_url}{link}" if link.startswith('/') else link for link in links]
links = list(filter(None, [link if link.startswith(self.category_url) else None for link in links]))
2020-11-04 03:34:31 +00:00
links = list(filter(None, [link if link != self.category_url else None for link in links]))
links = list(set(links))
2020-11-05 02:59:13 +00:00
if excludes:
links = list(filter(None, [None if any(e in link for e in excludes) else link for link in links]))
2020-11-03 22:08:50 +00:00
return links
2020-11-03 22:08:50 +00:00
# scratchpad so I can quickly develop the parser
if __name__ == '__main__':
2020-11-12 01:51:53 +00:00
print("Sitemap: The Spinoff")
site = Sitemap("https://thespinoff.co.nz/sitemap.xml")
2020-11-12 02:08:23 +00:00
excludes = [
'thespinoff.co.nz/sitemap-misc.xml',
'thespinoff.co.nz/sitemap-authors.xml',
'thespinoff.co.nz/sitemap-tax-category.xml',
]
posts = site.feed(excludes)
2020-11-05 20:28:55 +00:00
print(posts[:5])
print(site.story(posts[0]))