Compare commits
59 Commits
master
...
44b8b36547
Author | SHA1 | Date | |
---|---|---|---|
|
44b8b36547 | ||
|
4f49684194 | ||
|
1d78b1c592 | ||
|
0374794536 | ||
|
943a1cfa4f | ||
|
9cee370a25 | ||
|
5efc6ef2d3 | ||
|
4ec50e20cb | ||
|
c1b7877f4b | ||
|
7b8cbfc9b9 | ||
|
bfa4108a8e | ||
|
0bd0d40a31 | ||
|
4e04595415 | ||
|
006db2960c | ||
|
1f063f0dac | ||
|
1658346aa9 | ||
|
2dbc702b40 | ||
|
1c4764e67d | ||
|
ee49d2021e | ||
|
c391c50ab1 | ||
|
095f0d549a | ||
|
c21c71667e | ||
|
c3a2c91a11 | ||
|
0f39446a61 | ||
|
351059aab1 | ||
|
4488e2c292 | ||
|
afda5b635c | ||
|
0fc1a44d2b | ||
|
9fff1b9e46 | ||
|
16b59f6c67 | ||
|
939f4775a7 | ||
|
9bfc6fc6fa | ||
|
6ea9844d00 | ||
|
1318259d3d | ||
|
98a0c2257c | ||
|
e6976db25d | ||
|
9edc8b7cca | ||
|
33e21e7f30 | ||
|
892a99eca6 | ||
|
d718d05a04 | ||
|
d1795eb1b8 | ||
|
9f4ff4acf0 | ||
|
db6aad84ec | ||
|
29f8a8b8cc | ||
|
abf8589e02 | ||
|
b759f46582 | ||
|
736cdc8576 | ||
|
244d416f6e | ||
|
5f98a2e76a | ||
|
0567cdfd9b | ||
|
4f90671cec | ||
|
e63a1456a5 | ||
|
76f1d57702 | ||
|
de80389ed0 | ||
|
4e64cf682a | ||
|
c5fe5d25a0 | ||
|
283a2b1545 | ||
|
0d6a86ace2 | ||
|
f23bf628e0 |
@@ -1,9 +1,9 @@
|
|||||||
import json
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
from sqlalchemy import create_engine, Column, String, ForeignKey, Integer
|
from sqlalchemy import create_engine, Column, String, ForeignKey, Integer
|
||||||
from sqlalchemy.ext.declarative import declarative_base
|
from sqlalchemy.ext.declarative import declarative_base
|
||||||
from sqlalchemy.orm import sessionmaker
|
from sqlalchemy.orm import sessionmaker
|
||||||
from sqlalchemy.exc import IntegrityError
|
from sqlalchemy.exc import IntegrityError
|
||||||
|
from sqlalchemy.types import JSON
|
||||||
|
|
||||||
engine = create_engine('sqlite:///data/qotnews.sqlite')
|
engine = create_engine('sqlite:///data/qotnews.sqlite')
|
||||||
Session = sessionmaker(bind=engine)
|
Session = sessionmaker(bind=engine)
|
||||||
@@ -15,8 +15,8 @@ class Story(Base):
|
|||||||
|
|
||||||
sid = Column(String(16), primary_key=True)
|
sid = Column(String(16), primary_key=True)
|
||||||
ref = Column(String(16), unique=True)
|
ref = Column(String(16), unique=True)
|
||||||
meta_json = Column(String)
|
meta = Column(JSON)
|
||||||
full_json = Column(String)
|
data = Column(JSON)
|
||||||
title = Column(String)
|
title = Column(String)
|
||||||
|
|
||||||
class Reflist(Base):
|
class Reflist(Base):
|
||||||
@@ -36,19 +36,21 @@ def get_story(sid):
|
|||||||
|
|
||||||
def put_story(story):
|
def put_story(story):
|
||||||
story = story.copy()
|
story = story.copy()
|
||||||
full_json = json.dumps(story)
|
data = {}
|
||||||
|
data.update(story)
|
||||||
|
|
||||||
story.pop('text', None)
|
meta = {}
|
||||||
story.pop('comments', None)
|
meta.update(story)
|
||||||
meta_json = json.dumps(story)
|
meta.pop('text', None)
|
||||||
|
meta.pop('comments', None)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
session = Session()
|
session = Session()
|
||||||
s = Story(
|
s = Story(
|
||||||
sid=story['id'],
|
sid=story['id'],
|
||||||
ref=story['ref'],
|
ref=story['ref'],
|
||||||
full_json=full_json,
|
data=data,
|
||||||
meta_json=meta_json,
|
meta=meta,
|
||||||
title=story.get('title', None),
|
title=story.get('title', None),
|
||||||
)
|
)
|
||||||
session.merge(s)
|
session.merge(s)
|
||||||
@@ -63,18 +65,26 @@ def get_story_by_ref(ref):
|
|||||||
session = Session()
|
session = Session()
|
||||||
return session.query(Story).filter(Story.ref==ref).first()
|
return session.query(Story).filter(Story.ref==ref).first()
|
||||||
|
|
||||||
def get_reflist(amount):
|
def get_stories_by_url(url):
|
||||||
session = Session()
|
session = Session()
|
||||||
q = session.query(Reflist).order_by(Reflist.rid.desc()).limit(amount)
|
return session.query(Story).\
|
||||||
|
filter(Story.title != None).\
|
||||||
|
filter(Story.meta['url'].as_string() == url).\
|
||||||
|
order_by(Story.meta['date'].desc())
|
||||||
|
|
||||||
|
def get_reflist():
|
||||||
|
session = Session()
|
||||||
|
q = session.query(Reflist).order_by(Reflist.rid.desc())
|
||||||
return [dict(ref=x.ref, sid=x.sid, source=x.source) for x in q.all()]
|
return [dict(ref=x.ref, sid=x.sid, source=x.source) for x in q.all()]
|
||||||
|
|
||||||
def get_stories(amount):
|
def get_stories(maxage=60*60*24*2):
|
||||||
|
time = datetime.now().timestamp() - maxage
|
||||||
session = Session()
|
session = Session()
|
||||||
q = session.query(Reflist, Story.meta_json).\
|
q = session.query(Reflist, Story.meta).\
|
||||||
order_by(Reflist.rid.desc()).\
|
|
||||||
join(Story).\
|
join(Story).\
|
||||||
filter(Story.title != None).\
|
filter(Story.title != None).\
|
||||||
limit(amount)
|
filter(Story.meta['date'].as_integer() > time).\
|
||||||
|
order_by(Story.meta['date'].desc())
|
||||||
return [x[1] for x in q]
|
return [x[1] for x in q]
|
||||||
|
|
||||||
def put_ref(ref, sid, source):
|
def put_ref(ref, sid, source):
|
||||||
|
@@ -6,61 +6,81 @@ logging.basicConfig(
|
|||||||
import requests
|
import requests
|
||||||
import time
|
import time
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
|
import itertools
|
||||||
|
|
||||||
import settings
|
import settings
|
||||||
from feeds import hackernews, reddit, tildes, manual
|
from feeds import hackernews, reddit, tildes, substack, manual, news
|
||||||
|
from scrapers import outline, declutter, local
|
||||||
OUTLINE_API = 'https://api.outline.com/v3/parse_article'
|
|
||||||
READ_API = 'http://127.0.0.1:33843'
|
|
||||||
|
|
||||||
INVALID_DOMAINS = ['youtube.com', 'bloomberg.com', 'wsj.com']
|
INVALID_DOMAINS = ['youtube.com', 'bloomberg.com', 'wsj.com']
|
||||||
TWO_DAYS = 60*60*24*2
|
|
||||||
|
|
||||||
def list():
|
substacks = {}
|
||||||
feed = []
|
for key, value in settings.SUBSTACK.items():
|
||||||
|
substacks[key] = substack.Publication(value['url'])
|
||||||
|
categories = {}
|
||||||
|
for key, value in settings.CATEGORY.items():
|
||||||
|
categories[key] = news.Category(value['url'], value.get('tz'))
|
||||||
|
sitemaps = {}
|
||||||
|
for key, value in settings.SITEMAP.items():
|
||||||
|
sitemaps[key] = news.Sitemap(value['url'], value.get('tz'))
|
||||||
|
|
||||||
|
def get_list():
|
||||||
|
feeds = {}
|
||||||
|
|
||||||
if settings.NUM_HACKERNEWS:
|
if settings.NUM_HACKERNEWS:
|
||||||
feed += [(x, 'hackernews') for x in hackernews.feed()[:settings.NUM_HACKERNEWS]]
|
feeds['hackernews'] = [(x, 'hackernews') for x in hackernews.feed()[:settings.NUM_HACKERNEWS]]
|
||||||
|
|
||||||
if settings.NUM_REDDIT:
|
if settings.NUM_REDDIT:
|
||||||
feed += [(x, 'reddit') for x in reddit.feed()[:settings.NUM_REDDIT]]
|
feeds['reddit'] = [(x, 'reddit') for x in reddit.feed()[:settings.NUM_REDDIT]]
|
||||||
|
|
||||||
if settings.NUM_TILDES:
|
if settings.NUM_TILDES:
|
||||||
feed += [(x, 'tildes') for x in tildes.feed()[:settings.NUM_TILDES]]
|
feeds['tildes'] = [(x, 'tildes') for x in tildes.feed()[:settings.NUM_TILDES]]
|
||||||
|
|
||||||
|
if settings.NUM_SUBSTACK:
|
||||||
|
feeds['substack'] = [(x, 'substack') for x in substack.top.feed()[:settings.NUM_SUBSTACK]]
|
||||||
|
|
||||||
|
for key, publication in substacks.items():
|
||||||
|
count = settings.SUBSTACK[key]['count']
|
||||||
|
feeds[key] = [(x, key) for x in publication.feed()[:count]]
|
||||||
|
|
||||||
|
for key, sites in categories.items():
|
||||||
|
count = settings.CATEGORY[key].get('count') or 0
|
||||||
|
excludes = settings.CATEGORY[key].get('excludes')
|
||||||
|
tz = settings.CATEGORY[key].get('tz')
|
||||||
|
feeds[key] = [(x, key) for x in sites.feed(excludes)[:count]]
|
||||||
|
|
||||||
|
for key, sites in sitemaps.items():
|
||||||
|
count = settings.SITEMAP[key].get('count') or 0
|
||||||
|
excludes = settings.SITEMAP[key].get('excludes')
|
||||||
|
feeds[key] = [(x, key) for x in sites.feed(excludes)[:count]]
|
||||||
|
|
||||||
|
values = feeds.values()
|
||||||
|
feed = itertools.chain.from_iterable(itertools.zip_longest(*values, fillvalue=None))
|
||||||
|
feed = list(filter(None, feed))
|
||||||
return feed
|
return feed
|
||||||
|
|
||||||
def get_article(url):
|
def get_article(url):
|
||||||
try:
|
scrapers = {
|
||||||
params = {'source_url': url}
|
'declutter': declutter,
|
||||||
headers = {'Referer': 'https://outline.com/'}
|
'outline': outline,
|
||||||
r = requests.get(OUTLINE_API, params=params, headers=headers, timeout=20)
|
'local': local,
|
||||||
if r.status_code == 429:
|
}
|
||||||
logging.info('Rate limited by outline, sleeping 30s and skipping...')
|
available = settings.SCRAPERS or ['local']
|
||||||
time.sleep(30)
|
if 'local' not in available:
|
||||||
return ''
|
available += ['local']
|
||||||
if r.status_code != 200:
|
|
||||||
raise Exception('Bad response code ' + str(r.status_code))
|
|
||||||
html = r.json()['data']['html']
|
|
||||||
if 'URL is not supported by Outline' in html:
|
|
||||||
raise Exception('URL not supported by Outline')
|
|
||||||
return html
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
raise
|
|
||||||
except BaseException as e:
|
|
||||||
logging.error('Problem outlining article: {}'.format(str(e)))
|
|
||||||
|
|
||||||
logging.info('Trying our server instead...')
|
for scraper in available:
|
||||||
|
if scraper not in scrapers.keys():
|
||||||
try:
|
continue
|
||||||
r = requests.post(READ_API, data=dict(url=url), timeout=20)
|
try:
|
||||||
if r.status_code != 200:
|
html = scrapers[scraper].get_html(url)
|
||||||
raise Exception('Bad response code ' + str(r.status_code))
|
if html:
|
||||||
return r.text
|
return html
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
raise
|
raise
|
||||||
except BaseException as e:
|
except:
|
||||||
logging.error('Problem getting article: {}'.format(str(e)))
|
pass
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
def get_content_type(url):
|
def get_content_type(url):
|
||||||
try:
|
try:
|
||||||
@@ -87,6 +107,14 @@ def update_story(story, is_manual=False):
|
|||||||
res = reddit.story(story['ref'])
|
res = reddit.story(story['ref'])
|
||||||
elif story['source'] == 'tildes':
|
elif story['source'] == 'tildes':
|
||||||
res = tildes.story(story['ref'])
|
res = tildes.story(story['ref'])
|
||||||
|
elif story['source'] == 'substack':
|
||||||
|
res = substack.top.story(story['ref'])
|
||||||
|
elif story['source'] in categories.keys():
|
||||||
|
res = categories[story['source']].story(story['ref'])
|
||||||
|
elif story['source'] in sitemaps.keys():
|
||||||
|
res = sitemaps[story['source']].story(story['ref'])
|
||||||
|
elif story['source'] in substacks.keys():
|
||||||
|
res = substacks[story['source']].story(story['ref'])
|
||||||
elif story['source'] == 'manual':
|
elif story['source'] == 'manual':
|
||||||
res = manual.story(story['ref'])
|
res = manual.story(story['ref'])
|
||||||
|
|
||||||
@@ -96,7 +124,7 @@ def update_story(story, is_manual=False):
|
|||||||
logging.info('Story not ready yet')
|
logging.info('Story not ready yet')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if story['date'] and not is_manual and story['date'] + TWO_DAYS < time.time():
|
if story['date'] and not is_manual and story['date'] + settings.MAX_STORY_AGE < time.time():
|
||||||
logging.info('Story too old, removing')
|
logging.info('Story too old, removing')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
244
apiserver/feeds/news.py
Normal file
244
apiserver/feeds/news.py
Normal file
@@ -0,0 +1,244 @@
|
|||||||
|
import logging
|
||||||
|
logging.basicConfig(
|
||||||
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||||
|
level=logging.DEBUG)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
import sys
|
||||||
|
sys.path.insert(0,'.')
|
||||||
|
|
||||||
|
import requests
|
||||||
|
from datetime import datetime
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
from scrapers import declutter
|
||||||
|
import dateutil.parser
|
||||||
|
import extruct
|
||||||
|
import pytz
|
||||||
|
|
||||||
|
from utils import clean
|
||||||
|
|
||||||
|
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0'
|
||||||
|
#USER_AGENT = "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
|
||||||
|
|
||||||
|
def unix(date_str, tz=None):
|
||||||
|
try:
|
||||||
|
dt = dateutil.parser.parse(date_str)
|
||||||
|
if tz:
|
||||||
|
dt = pytz.timezone(tz).localize(dt)
|
||||||
|
return int(dt.timestamp())
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def xml(route, ref=None):
|
||||||
|
try:
|
||||||
|
headers = {'User-Agent': USER_AGENT, 'X-Forwarded-For': '66.249.66.1'}
|
||||||
|
r = requests.get(route(ref), headers=headers, timeout=5)
|
||||||
|
if r.status_code != 200:
|
||||||
|
raise Exception('Bad response code ' + str(r.status_code))
|
||||||
|
return r.text
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
raise
|
||||||
|
except BaseException as e:
|
||||||
|
logging.error('Problem hitting URL: {}'.format(str(e)))
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def parse_extruct(s, data):
|
||||||
|
for rdfa in data['rdfa']:
|
||||||
|
for key, props in rdfa.items():
|
||||||
|
if 'http://ogp.me/ns#title' in props:
|
||||||
|
for values in props['http://ogp.me/ns#title']:
|
||||||
|
s['title'] = values['@value']
|
||||||
|
if 'http://ogp.me/ns/article#modified_time' in props:
|
||||||
|
for values in props['http://ogp.me/ns/article#modified_time']:
|
||||||
|
s['date'] = values['@value']
|
||||||
|
if 'http://ogp.me/ns/article#published_time' in props:
|
||||||
|
for values in props['http://ogp.me/ns/article#published_time']:
|
||||||
|
s['date'] = values['@value']
|
||||||
|
|
||||||
|
for og in data['opengraph']:
|
||||||
|
titles = list(filter(None, [value if 'og:title' in key else None for key, value in og['properties']]))
|
||||||
|
modified = list(filter(None, [value if 'article:modified_time' in key else None for key, value in og['properties']]))
|
||||||
|
published = list(filter(None, [value if 'article:published_time' in key else None for key, value in og['properties']]))
|
||||||
|
if len(modified):
|
||||||
|
s['date'] = modified[0]
|
||||||
|
if len(published):
|
||||||
|
s['date'] = published[0]
|
||||||
|
if len(titles):
|
||||||
|
s['title'] = titles[0]
|
||||||
|
|
||||||
|
for md in data['microdata']:
|
||||||
|
if md['type'] == 'https://schema.org/NewsArticle':
|
||||||
|
props = md['properties']
|
||||||
|
s['title'] = props['headline']
|
||||||
|
if props['dateModified']:
|
||||||
|
s['date'] = props['dateModified']
|
||||||
|
if props['datePublished']:
|
||||||
|
s['date'] = props['datePublished']
|
||||||
|
if 'author' in props and props['author']:
|
||||||
|
s['author'] = props['author']['properties']['name']
|
||||||
|
|
||||||
|
for ld in data['json-ld']:
|
||||||
|
if '@type' in ld and ld['@type'] in ['Article', 'NewsArticle']:
|
||||||
|
s['title'] = ld['headline']
|
||||||
|
if ld['dateModified']:
|
||||||
|
s['date'] = ld['dateModified']
|
||||||
|
if ld['datePublished']:
|
||||||
|
s['date'] = ld['datePublished']
|
||||||
|
if 'author' in ld and ld['author']:
|
||||||
|
s['author'] = ld['author']['name']
|
||||||
|
if '@graph' in ld:
|
||||||
|
for gld in ld['@graph']:
|
||||||
|
if '@type' in gld and gld['@type'] in ['Article', 'NewsArticle']:
|
||||||
|
s['title'] = gld['headline']
|
||||||
|
if gld['dateModified']:
|
||||||
|
s['date'] = gld['dateModified']
|
||||||
|
if gld['datePublished']:
|
||||||
|
s['date'] = gld['datePublished']
|
||||||
|
|
||||||
|
return s
|
||||||
|
|
||||||
|
def comment(i):
|
||||||
|
if 'author' not in i:
|
||||||
|
return False
|
||||||
|
|
||||||
|
c = {}
|
||||||
|
c['author'] = i.get('author', '')
|
||||||
|
c['score'] = i.get('points', 0)
|
||||||
|
c['date'] = unix(i.get('date', 0))
|
||||||
|
c['text'] = clean(i.get('text', '') or '')
|
||||||
|
c['comments'] = [comment(j) for j in i['children']]
|
||||||
|
c['comments'] = list(filter(bool, c['comments']))
|
||||||
|
return c
|
||||||
|
|
||||||
|
def comment_count(i):
|
||||||
|
alive = 1 if i['author'] else 0
|
||||||
|
return sum([comment_count(c) for c in i['comments']]) + alive
|
||||||
|
|
||||||
|
class _Base:
|
||||||
|
def __init__(url, tz=None):
|
||||||
|
self.url = url
|
||||||
|
self.tz = tz
|
||||||
|
|
||||||
|
def feed(self, excludes=None):
|
||||||
|
return []
|
||||||
|
|
||||||
|
def story(self, ref):
|
||||||
|
markup = xml(lambda x: ref)
|
||||||
|
if not markup:
|
||||||
|
return False
|
||||||
|
|
||||||
|
s = {}
|
||||||
|
s['author_link'] = ''
|
||||||
|
s['score'] = 0
|
||||||
|
s['comments'] = []
|
||||||
|
s['num_comments'] = 0
|
||||||
|
s['link'] = ref
|
||||||
|
s['url'] = ref
|
||||||
|
s['date'] = 0
|
||||||
|
|
||||||
|
soup = BeautifulSoup(markup, features='html.parser')
|
||||||
|
icon32 = soup.find_all('link', rel="icon", href=True, sizes="32x32")
|
||||||
|
icon16 = soup.find_all('link', rel="icon", href=True, sizes="16x16")
|
||||||
|
favicon = soup.find_all('link', rel="shortcut icon", href=True)
|
||||||
|
others = soup.find_all('link', rel="icon", href=True)
|
||||||
|
icons = icon32 + icon16 + favicon + others
|
||||||
|
base_url = '/'.join(ref.split('/')[:3])
|
||||||
|
icons = list(set([i.get('href') for i in icons]))
|
||||||
|
icons = [i if i.startswith('http') else base_url + i for i in icons]
|
||||||
|
|
||||||
|
if icons:
|
||||||
|
s['icon'] = icons[0]
|
||||||
|
|
||||||
|
data = extruct.extract(markup)
|
||||||
|
s = parse_extruct(s, data)
|
||||||
|
if s['date']:
|
||||||
|
s['date'] = unix(s['date'], tz=self.tz)
|
||||||
|
|
||||||
|
if 'disqus' in markup:
|
||||||
|
try:
|
||||||
|
s['comments'] = declutter.get_comments(ref)
|
||||||
|
c['comments'] = list(filter(bool, c['comments']))
|
||||||
|
s['num_comments'] = comment_count(s['comments'])
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
raise
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if not s['date']:
|
||||||
|
return False
|
||||||
|
return s
|
||||||
|
|
||||||
|
def get_sitemap_date(a):
|
||||||
|
if a.find('lastmod'):
|
||||||
|
return a.find('lastmod').text
|
||||||
|
if a.find('news:publication_date'):
|
||||||
|
return a.find('news:publication_date').text
|
||||||
|
if a.find('ns2:publication_date'):
|
||||||
|
return a.find('ns2:publication_date').text
|
||||||
|
return ''
|
||||||
|
|
||||||
|
class Sitemap(_Base):
|
||||||
|
def __init__(self, url, tz=None):
|
||||||
|
self.tz = tz
|
||||||
|
self.sitemap_url = url
|
||||||
|
|
||||||
|
def feed(self, excludes=None):
|
||||||
|
markup = xml(lambda x: self.sitemap_url)
|
||||||
|
if not markup: return []
|
||||||
|
soup = BeautifulSoup(markup, features='lxml')
|
||||||
|
sitemap = soup.find('urlset').findAll('url')
|
||||||
|
|
||||||
|
links = list(filter(None, [a if a.find('loc') else None for a in sitemap]))
|
||||||
|
links = list(filter(None, [a if get_sitemap_date(a) else None for a in links]))
|
||||||
|
links.sort(key=lambda a: unix(get_sitemap_date(a)), reverse=True)
|
||||||
|
links = [x.find('loc').text for x in links] or []
|
||||||
|
links = list(set(links))
|
||||||
|
if excludes:
|
||||||
|
links = list(filter(None, [None if any(e in link for e in excludes) else link for link in links]))
|
||||||
|
return links
|
||||||
|
|
||||||
|
|
||||||
|
class Category(_Base):
|
||||||
|
def __init__(self, url, tz=None):
|
||||||
|
self.tz = tz
|
||||||
|
self.category_url = url
|
||||||
|
self.base_url = '/'.join(url.split('/')[:3])
|
||||||
|
|
||||||
|
def feed(self, excludes=None):
|
||||||
|
markup = xml(lambda x: self.category_url)
|
||||||
|
if not markup: return []
|
||||||
|
soup = BeautifulSoup(markup, features='html.parser')
|
||||||
|
links = soup.find_all('a', href=True)
|
||||||
|
links = [link.get('href') for link in links]
|
||||||
|
links = [f"{self.base_url}{link}" if link.startswith('/') else link for link in links]
|
||||||
|
links = list(filter(None, [link if link.startswith(self.category_url) else None for link in links]))
|
||||||
|
links = list(filter(None, [link if link != self.category_url else None for link in links]))
|
||||||
|
links = list(set(links))
|
||||||
|
if excludes:
|
||||||
|
links = list(filter(None, [None if any(e in link for e in excludes) else link for link in links]))
|
||||||
|
return links
|
||||||
|
|
||||||
|
|
||||||
|
# scratchpad so I can quickly develop the parser
|
||||||
|
if __name__ == '__main__':
|
||||||
|
print("Sitemap: Stuff")
|
||||||
|
site = Sitemap("https://www.stuff.co.nz/sitemap/news/sitemap.xml")
|
||||||
|
posts = site.feed()
|
||||||
|
print(posts[:5])
|
||||||
|
print(site.story(posts[0]))
|
||||||
|
|
||||||
|
print("Category: RadioNZ Te Ao Māori")
|
||||||
|
site = Category("https://www.rnz.co.nz/news/te-manu-korihi/")
|
||||||
|
posts = site.feed()
|
||||||
|
print(posts[:5])
|
||||||
|
print(site.story(posts[0]))
|
||||||
|
|
||||||
|
print("Sitemap: Newsroom")
|
||||||
|
site = Sitemap("https://www.newsroom.co.nz/sitemap.xml")
|
||||||
|
posts = site.feed()
|
||||||
|
print(posts[:5])
|
||||||
|
print(site.story(posts[0]))
|
||||||
|
|
165
apiserver/feeds/substack.py
Normal file
165
apiserver/feeds/substack.py
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
import logging
|
||||||
|
logging.basicConfig(
|
||||||
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||||
|
level=logging.DEBUG)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
import sys
|
||||||
|
sys.path.insert(0,'.')
|
||||||
|
|
||||||
|
import requests
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from utils import clean
|
||||||
|
|
||||||
|
SUBSTACK_REFERER = 'https://substack.com'
|
||||||
|
SUBSTACK_API_TOP_POSTS = lambda x: "https://substack.com/api/v1/reader/top-posts"
|
||||||
|
|
||||||
|
def author_link(author_id, base_url):
|
||||||
|
return f"{base_url}/people/{author_id}"
|
||||||
|
def api_comments(post_id, base_url):
|
||||||
|
return f"{base_url}/api/v1/post/{post_id}/comments?all_comments=true&sort=best_first"
|
||||||
|
def api_stories(x, base_url):
|
||||||
|
return f"{base_url}/api/v1/archive?sort=new&search=&offset=0&limit=100"
|
||||||
|
|
||||||
|
def unix(date_str):
|
||||||
|
return int(datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S.%fZ').timestamp())
|
||||||
|
|
||||||
|
def api(route, ref=None, referer=None):
|
||||||
|
headers = {'Referer': referer} if referer else None
|
||||||
|
try:
|
||||||
|
r = requests.get(route(ref), headers=headers, timeout=10)
|
||||||
|
if r.status_code != 200:
|
||||||
|
raise Exception('Bad response code ' + str(r.status_code))
|
||||||
|
return r.json()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
raise
|
||||||
|
except BaseException as e:
|
||||||
|
logging.error('Problem hitting Substack API: {}, trying again'.format(str(e)))
|
||||||
|
|
||||||
|
try:
|
||||||
|
r = requests.get(route(ref), headers=headers, timeout=20)
|
||||||
|
if r.status_code != 200:
|
||||||
|
raise Exception('Bad response code ' + str(r.status_code))
|
||||||
|
return r.json()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
raise
|
||||||
|
except BaseException as e:
|
||||||
|
logging.error('Problem hitting Substack API: {}'.format(str(e)))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def comment(i):
|
||||||
|
if 'body' not in i:
|
||||||
|
return False
|
||||||
|
|
||||||
|
c = {}
|
||||||
|
c['date'] = unix(i.get('date'))
|
||||||
|
c['author'] = i.get('name', '')
|
||||||
|
c['score'] = i.get('reactions').get('❤')
|
||||||
|
c['text'] = clean(i.get('body', '') or '')
|
||||||
|
c['comments'] = [comment(j) for j in i['children']]
|
||||||
|
c['comments'] = list(filter(bool, c['comments']))
|
||||||
|
|
||||||
|
return c
|
||||||
|
|
||||||
|
class Publication:
|
||||||
|
def __init__(self, domain):
|
||||||
|
self.BASE_DOMAIN = domain
|
||||||
|
|
||||||
|
def feed(self):
|
||||||
|
stories = api(lambda x: api_stories(x, self.BASE_DOMAIN), referer=self.BASE_DOMAIN)
|
||||||
|
if not stories: return []
|
||||||
|
stories = list(filter(None, [i if i.get("audience") == "everyone" else None for i in stories]))
|
||||||
|
return [str(i.get("id")) for i in stories or []]
|
||||||
|
|
||||||
|
def story(self, ref):
|
||||||
|
stories = api(lambda x: api_stories(x, self.BASE_DOMAIN), referer=self.BASE_DOMAIN)
|
||||||
|
if not stories: return False
|
||||||
|
stories = list(filter(None, [i if i.get("audience") == "everyone" else None for i in stories]))
|
||||||
|
stories = list(filter(None, [i if str(i.get('id')) == ref else None for i in stories]))
|
||||||
|
|
||||||
|
if len(stories) == 0:
|
||||||
|
return False
|
||||||
|
|
||||||
|
r = stories[0]
|
||||||
|
if not r:
|
||||||
|
return False
|
||||||
|
|
||||||
|
s = {}
|
||||||
|
s['author'] = ''
|
||||||
|
s['author_link'] = ''
|
||||||
|
|
||||||
|
s['date'] = unix(r.get('post_date'))
|
||||||
|
s['score'] = r.get('reactions').get('❤')
|
||||||
|
s['title'] = r.get('title', '')
|
||||||
|
s['link'] = r.get('canonical_url', '')
|
||||||
|
s['url'] = r.get('canonical_url', '')
|
||||||
|
comments = api(lambda x: api_comments(x, self.BASE_DOMAIN), r.get('id'), referer=self.BASE_DOMAIN)
|
||||||
|
s['comments'] = [comment(i) for i in comments.get('comments')]
|
||||||
|
s['comments'] = list(filter(bool, s['comments']))
|
||||||
|
s['num_comments'] = r.get('comment_count', 0)
|
||||||
|
|
||||||
|
authors = list(filter(None, [self._bylines(byline) for byline in r.get('publishedBylines')]))
|
||||||
|
if len(authors):
|
||||||
|
s['author'] = authors[0].get('name')
|
||||||
|
s['author_link'] = authors[0].get('link')
|
||||||
|
|
||||||
|
return s
|
||||||
|
|
||||||
|
def _bylines(self, b):
|
||||||
|
if 'id' not in b:
|
||||||
|
return None
|
||||||
|
a = {}
|
||||||
|
a['name'] = b.get('name')
|
||||||
|
a['link'] = author_link(b.get('id'), self.BASE_DOMAIN)
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
class Top:
|
||||||
|
def feed(self):
|
||||||
|
stories = api(SUBSTACK_API_TOP_POSTS, referer=SUBSTACK_REFERER)
|
||||||
|
if not stories: return []
|
||||||
|
stories = list(filter(None, [i if i.get("audience") == "everyone" else None for i in stories]))
|
||||||
|
return [str(i.get("id")) for i in stories or []]
|
||||||
|
|
||||||
|
def story(self, ref):
|
||||||
|
stories = api(SUBSTACK_API_TOP_POSTS, referer=SUBSTACK_REFERER)
|
||||||
|
if not stories: return False
|
||||||
|
stories = list(filter(None, [i if i.get("audience") == "everyone" else None for i in stories]))
|
||||||
|
stories = list(filter(None, [i if str(i.get('id')) == ref else None for i in stories]))
|
||||||
|
|
||||||
|
if len(stories) == 0:
|
||||||
|
return False
|
||||||
|
|
||||||
|
r = stories[0]
|
||||||
|
if not r:
|
||||||
|
return False
|
||||||
|
|
||||||
|
s = {}
|
||||||
|
pub = r.get('pub')
|
||||||
|
base_url = pub.get('base_url')
|
||||||
|
s['author'] = pub.get('author_name')
|
||||||
|
s['author_link'] = author_link(pub.get('author_id'), base_url)
|
||||||
|
|
||||||
|
s['date'] = unix(r.get('post_date'))
|
||||||
|
s['score'] = r.get('score')
|
||||||
|
s['title'] = r.get('title', '')
|
||||||
|
s['link'] = r.get('canonical_url', '')
|
||||||
|
s['url'] = r.get('canonical_url', '')
|
||||||
|
comments = api(lambda x: api_comments(x, base_url), r.get('id'), referer=SUBSTACK_REFERER)
|
||||||
|
s['comments'] = [comment(i) for i in comments.get('comments')]
|
||||||
|
s['comments'] = list(filter(bool, s['comments']))
|
||||||
|
s['num_comments'] = r.get('comment_count', 0)
|
||||||
|
|
||||||
|
return s
|
||||||
|
|
||||||
|
top = Top()
|
||||||
|
|
||||||
|
# scratchpad so I can quickly develop the parser
|
||||||
|
if __name__ == '__main__':
|
||||||
|
top_posts = top.feed()
|
||||||
|
print(top.story(top_posts[0]))
|
||||||
|
|
||||||
|
webworm = Publication("https://www.webworm.co/")
|
||||||
|
posts = webworm.feed()
|
||||||
|
print(webworm.story(posts[0]))
|
@@ -4,6 +4,7 @@ certifi==2020.6.20
|
|||||||
chardet==3.0.4
|
chardet==3.0.4
|
||||||
click==7.1.2
|
click==7.1.2
|
||||||
commonmark==0.9.1
|
commonmark==0.9.1
|
||||||
|
extruct==0.10.0
|
||||||
Flask==1.1.2
|
Flask==1.1.2
|
||||||
Flask-Cors==3.0.8
|
Flask-Cors==3.0.8
|
||||||
gevent==20.6.2
|
gevent==20.6.2
|
||||||
@@ -11,11 +12,13 @@ greenlet==0.4.16
|
|||||||
idna==2.10
|
idna==2.10
|
||||||
itsdangerous==1.1.0
|
itsdangerous==1.1.0
|
||||||
Jinja2==2.11.2
|
Jinja2==2.11.2
|
||||||
|
lxml==4.6.1
|
||||||
MarkupSafe==1.1.1
|
MarkupSafe==1.1.1
|
||||||
packaging==20.4
|
packaging==20.4
|
||||||
praw==6.4.0
|
praw==6.4.0
|
||||||
prawcore==1.4.0
|
prawcore==1.4.0
|
||||||
pyparsing==2.4.7
|
pyparsing==2.4.7
|
||||||
|
pytz==2020.4
|
||||||
requests==2.24.0
|
requests==2.24.0
|
||||||
six==1.15.0
|
six==1.15.0
|
||||||
soupsieve==2.0.1
|
soupsieve==2.0.1
|
||||||
@@ -27,3 +30,4 @@ websocket-client==0.57.0
|
|||||||
Werkzeug==1.0.1
|
Werkzeug==1.0.1
|
||||||
zope.event==4.4
|
zope.event==4.4
|
||||||
zope.interface==5.1.0
|
zope.interface==5.1.0
|
||||||
|
python-dateutil==2.8.1
|
||||||
|
41
apiserver/scrapers/declutter.py
Normal file
41
apiserver/scrapers/declutter.py
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
import logging
|
||||||
|
logging.basicConfig(
|
||||||
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||||
|
level=logging.DEBUG)
|
||||||
|
import requests
|
||||||
|
|
||||||
|
DECLUTTER_API = 'https://declutter.1j.nz/details'
|
||||||
|
DECLUTTER_COMMENT_API = 'https://declutter.1j.nz/comments'
|
||||||
|
TIMEOUT = 30
|
||||||
|
|
||||||
|
|
||||||
|
def get_html(url):
|
||||||
|
logging.info(f"Declutter Scraper: {url}")
|
||||||
|
details = get_details(url)
|
||||||
|
if not details:
|
||||||
|
return ''
|
||||||
|
return details['content']
|
||||||
|
|
||||||
|
def get_details(url):
|
||||||
|
try:
|
||||||
|
r = requests.post(DECLUTTER_API, data=dict(url=url), timeout=TIMEOUT)
|
||||||
|
if r.status_code != 200:
|
||||||
|
raise Exception('Bad response code ' + str(r.status_code))
|
||||||
|
return r.json()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
raise
|
||||||
|
except BaseException as e:
|
||||||
|
logging.error('Problem decluttering article: {}'.format(str(e)))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_comments(url):
|
||||||
|
try:
|
||||||
|
r = requests.post(DECLUTTER_COMMENT_API, data=dict(url=url), timeout=TIMEOUT)
|
||||||
|
if r.status_code != 200:
|
||||||
|
raise Exception('Bad response code ' + str(r.status_code))
|
||||||
|
return r.json()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
raise
|
||||||
|
except BaseException as e:
|
||||||
|
logging.error('Problem getting comments for article: {}'.format(str(e)))
|
||||||
|
return None
|
27
apiserver/scrapers/local.py
Normal file
27
apiserver/scrapers/local.py
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
import logging
|
||||||
|
logging.basicConfig(
|
||||||
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||||
|
level=logging.DEBUG)
|
||||||
|
import requests
|
||||||
|
|
||||||
|
READ_API = 'http://127.0.0.1:33843/details'
|
||||||
|
TIMEOUT = 20
|
||||||
|
|
||||||
|
def get_html(url):
|
||||||
|
logging.info(f"Local Scraper: {url}")
|
||||||
|
details = get_details(url)
|
||||||
|
if not details:
|
||||||
|
return ''
|
||||||
|
return details['content']
|
||||||
|
|
||||||
|
def get_details(url):
|
||||||
|
try:
|
||||||
|
r = requests.post(READ_API, data=dict(url=url), timeout=TIMEOUT)
|
||||||
|
if r.status_code != 200:
|
||||||
|
raise Exception('Bad response code ' + str(r.status_code))
|
||||||
|
return r.json()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
raise
|
||||||
|
except BaseException as e:
|
||||||
|
logging.error('Problem getting article: {}'.format(str(e)))
|
||||||
|
return None
|
37
apiserver/scrapers/outline.py
Normal file
37
apiserver/scrapers/outline.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
import logging
|
||||||
|
logging.basicConfig(
|
||||||
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||||
|
level=logging.DEBUG)
|
||||||
|
import requests
|
||||||
|
|
||||||
|
OUTLINE_REFERER = 'https://outline.com/'
|
||||||
|
OUTLINE_API = 'https://api.outline.com/v3/parse_article'
|
||||||
|
TIMEOUT = 20
|
||||||
|
|
||||||
|
def get_html(url):
|
||||||
|
details = get_details(url)
|
||||||
|
if not details:
|
||||||
|
return ''
|
||||||
|
return details['html']
|
||||||
|
|
||||||
|
def get_details(url):
|
||||||
|
try:
|
||||||
|
logging.info(f"Outline Scraper: {url}")
|
||||||
|
params = {'source_url': url}
|
||||||
|
headers = {'Referer': OUTLINE_REFERER}
|
||||||
|
r = requests.get(OUTLINE_API, params=params, headers=headers, timeout=TIMEOUT)
|
||||||
|
if r.status_code == 429:
|
||||||
|
logging.info('Rate limited by outline, sleeping 30s and skipping...')
|
||||||
|
time.sleep(30)
|
||||||
|
return None
|
||||||
|
if r.status_code != 200:
|
||||||
|
raise Exception('Bad response code ' + str(r.status_code))
|
||||||
|
data = r.json()['data']
|
||||||
|
if 'URL is not supported by Outline' in data['html']:
|
||||||
|
raise Exception('URL not supported by Outline')
|
||||||
|
return data
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
raise
|
||||||
|
except BaseException as e:
|
||||||
|
logging.error('Problem outlining article: {}'.format(str(e)))
|
||||||
|
return None
|
@@ -39,10 +39,7 @@ def update_attributes():
|
|||||||
r = requests.post(MEILI_URL + 'indexes/qotnews/settings/searchable-attributes', json=json, timeout=2)
|
r = requests.post(MEILI_URL + 'indexes/qotnews/settings/searchable-attributes', json=json, timeout=2)
|
||||||
if r.status_code != 202:
|
if r.status_code != 202:
|
||||||
raise Exception('Bad response code ' + str(r.status_code))
|
raise Exception('Bad response code ' + str(r.status_code))
|
||||||
return r.json()
|
requests.delete(MEILI_URL + 'indexes/qotnews/settings/displayed-attributes', timeout=2)
|
||||||
r = requests.delete(MEILI_URL + 'indexes/qotnews/settings/displayed-attributes', timeout=2)
|
|
||||||
if r.status_code != 202:
|
|
||||||
raise Exception('Bad response code ' + str(r.status_code))
|
|
||||||
return r.json()
|
return r.json()
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
raise
|
raise
|
||||||
|
@@ -15,6 +15,7 @@ import traceback
|
|||||||
import time
|
import time
|
||||||
from urllib.parse import urlparse, parse_qs
|
from urllib.parse import urlparse, parse_qs
|
||||||
|
|
||||||
|
import settings
|
||||||
import database
|
import database
|
||||||
import search
|
import search
|
||||||
import feed
|
import feed
|
||||||
@@ -27,9 +28,6 @@ from flask_cors import CORS
|
|||||||
database.init()
|
database.init()
|
||||||
search.init()
|
search.init()
|
||||||
|
|
||||||
FEED_LENGTH = 75
|
|
||||||
news_index = 0
|
|
||||||
|
|
||||||
def new_id():
|
def new_id():
|
||||||
nid = gen_rand_id()
|
nid = gen_rand_id()
|
||||||
while database.get_story(nid):
|
while database.get_story(nid):
|
||||||
@@ -42,9 +40,8 @@ cors = CORS(flask_app)
|
|||||||
|
|
||||||
@flask_app.route('/api')
|
@flask_app.route('/api')
|
||||||
def api():
|
def api():
|
||||||
stories = database.get_stories(FEED_LENGTH)
|
stories = database.get_stories(settings.MAX_STORY_AGE)
|
||||||
# hacky nested json
|
res = Response(json.dumps({"stories": stories}))
|
||||||
res = Response('{"stories":[' + ','.join(stories) + ']}')
|
|
||||||
res.headers['content-type'] = 'application/json'
|
res.headers['content-type'] = 'application/json'
|
||||||
return res
|
return res
|
||||||
|
|
||||||
@@ -102,8 +99,9 @@ def submit():
|
|||||||
def story(sid):
|
def story(sid):
|
||||||
story = database.get_story(sid)
|
story = database.get_story(sid)
|
||||||
if story:
|
if story:
|
||||||
# hacky nested json
|
related = database.get_stories_by_url(story.meta['url'])
|
||||||
res = Response('{"story":' + story.full_json + '}')
|
related = [r.meta for r in related]
|
||||||
|
res = Response(json.dumps({"story": story.data, "related": related}))
|
||||||
res.headers['content-type'] = 'application/json'
|
res.headers['content-type'] = 'application/json'
|
||||||
return res
|
return res
|
||||||
else:
|
else:
|
||||||
@@ -127,7 +125,7 @@ def static_story(sid):
|
|||||||
|
|
||||||
story = database.get_story(sid)
|
story = database.get_story(sid)
|
||||||
if not story: return abort(404)
|
if not story: return abort(404)
|
||||||
story = json.loads(story.full_json)
|
story = story.data
|
||||||
|
|
||||||
score = story['score']
|
score = story['score']
|
||||||
num_comments = story['num_comments']
|
num_comments = story['num_comments']
|
||||||
@@ -146,52 +144,49 @@ def static_story(sid):
|
|||||||
|
|
||||||
http_server = WSGIServer(('', 33842), flask_app)
|
http_server = WSGIServer(('', 33842), flask_app)
|
||||||
|
|
||||||
def feed_thread():
|
def _add_new_refs():
|
||||||
global news_index
|
for ref, source in feed.get_list():
|
||||||
|
if database.get_story_by_ref(ref):
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
nid = new_id()
|
||||||
|
database.put_ref(ref, nid, source)
|
||||||
|
logging.info('Added ref ' + ref)
|
||||||
|
except database.IntegrityError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
def _update_current_story(item):
|
||||||
|
try:
|
||||||
|
story = database.get_story(item['sid']).data
|
||||||
|
except AttributeError:
|
||||||
|
story = dict(id=item['sid'], ref=item['ref'], source=item['source'])
|
||||||
|
|
||||||
|
logging.info('Updating story: {}'.format(str(story['ref'])))
|
||||||
|
|
||||||
|
valid = feed.update_story(story)
|
||||||
|
if valid:
|
||||||
|
database.put_story(story)
|
||||||
|
search.put_story(story)
|
||||||
|
else:
|
||||||
|
database.del_ref(item['ref'])
|
||||||
|
logging.info('Removed ref {}'.format(item['ref']))
|
||||||
|
|
||||||
|
def feed_thread():
|
||||||
|
ref_list = []
|
||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
# onboard new stories
|
# onboard new stories
|
||||||
if news_index == 0:
|
if not len(ref_list):
|
||||||
for ref, source in feed.list():
|
_add_new_refs()
|
||||||
if database.get_story_by_ref(ref):
|
ref_list = database.get_reflist()
|
||||||
continue
|
|
||||||
try:
|
|
||||||
nid = new_id()
|
|
||||||
database.put_ref(ref, nid, source)
|
|
||||||
logging.info('Added ref ' + ref)
|
|
||||||
except database.IntegrityError:
|
|
||||||
continue
|
|
||||||
|
|
||||||
ref_list = database.get_reflist(FEED_LENGTH)
|
|
||||||
|
|
||||||
# update current stories
|
# update current stories
|
||||||
if news_index < len(ref_list):
|
if len(ref_list):
|
||||||
item = ref_list[news_index]
|
item = ref_list.pop(0)
|
||||||
|
_update_current_story(item)
|
||||||
try:
|
|
||||||
story_json = database.get_story(item['sid']).full_json
|
|
||||||
story = json.loads(story_json)
|
|
||||||
except AttributeError:
|
|
||||||
story = dict(id=item['sid'], ref=item['ref'], source=item['source'])
|
|
||||||
|
|
||||||
logging.info('Updating story: ' + str(story['ref']) + ', index: ' + str(news_index))
|
|
||||||
|
|
||||||
valid = feed.update_story(story)
|
|
||||||
if valid:
|
|
||||||
database.put_story(story)
|
|
||||||
search.put_story(story)
|
|
||||||
else:
|
|
||||||
database.del_ref(item['ref'])
|
|
||||||
logging.info('Removed ref {}'.format(item['ref']))
|
|
||||||
else:
|
|
||||||
logging.info('Skipping index: ' + str(news_index))
|
|
||||||
|
|
||||||
gevent.sleep(6)
|
gevent.sleep(6)
|
||||||
|
|
||||||
news_index += 1
|
|
||||||
if news_index == FEED_LENGTH: news_index = 0
|
|
||||||
|
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
logging.info('Ending feed thread...')
|
logging.info('Ending feed thread...')
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
|
@@ -1,12 +1,28 @@
|
|||||||
# QotNews settings
|
# QotNews settings
|
||||||
# edit this file and save it as settings.py
|
# edit this file and save it as settings.py
|
||||||
|
|
||||||
|
MAX_STORY_AGE = 3*24*60*60
|
||||||
|
|
||||||
# Feed Lengths
|
# Feed Lengths
|
||||||
# Number of top items from each site to pull
|
# Number of top items from each site to pull
|
||||||
# set to 0 to disable that site
|
# set to 0 to disable that site
|
||||||
NUM_HACKERNEWS = 15
|
NUM_HACKERNEWS = 15
|
||||||
NUM_REDDIT = 10
|
NUM_REDDIT = 10
|
||||||
NUM_TILDES = 5
|
NUM_TILDES = 5
|
||||||
|
NUM_SUBSTACK = 10
|
||||||
|
|
||||||
|
SITEMAP = {}
|
||||||
|
# SITEMAP['nzherald'] = { 'url': "https://www.nzherald.co.nz/arcio/news-sitemap/", 'count': 10},
|
||||||
|
# SITEMAP['stuff'] = { 'url': "https://www.stuff.co.nz/sitemap.xml", 'count': 10},
|
||||||
|
|
||||||
|
SUBSTACK = {}
|
||||||
|
# SUBSTACK['webworm'] = { 'url': "https://www.webworm.co", 'count': 10},
|
||||||
|
# SUBSTACK['the bulletin'] = { 'url': "https://thespinoff.substack.com", 'count': 10},
|
||||||
|
|
||||||
|
CATEGORY = {}
|
||||||
|
# CATEGORY['rnz national'] = { 'url': "https://www.rnz.co.nz/news/national", 'count': 10},
|
||||||
|
|
||||||
|
SCRAPERS = ['declutter', 'outline', 'local']
|
||||||
|
|
||||||
# Reddit account info
|
# Reddit account info
|
||||||
# leave blank if not using Reddit
|
# leave blank if not using Reddit
|
||||||
|
4
readerserver/constants.js
Normal file
4
readerserver/constants.js
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
module.exports.headers = {
|
||||||
|
'User-Agent': 'Googlebot/2.1 (+http://www.google.com/bot.html)',
|
||||||
|
'X-Forwarded-For': '66.249.66.1',
|
||||||
|
};
|
@@ -1,52 +1,29 @@
|
|||||||
|
const port = 33843;
|
||||||
const express = require('express');
|
const express = require('express');
|
||||||
const app = express();
|
const app = express();
|
||||||
const port = 33843;
|
const simple = require('./scraper/simple');
|
||||||
|
|
||||||
const request = require('request');
|
|
||||||
const JSDOM = require('jsdom').JSDOM;
|
|
||||||
const { Readability } = require('readability');
|
|
||||||
|
|
||||||
app.use(express.urlencoded({ extended: true }));
|
app.use(express.urlencoded({ extended: true }));
|
||||||
|
|
||||||
app.get('/', (req, res) => {
|
app.get('/', (req, res) => {
|
||||||
res.send('<form method="POST" accept-charset="UTF-8"><input name="url"><button type="submit">SUBMIT</button></form>');
|
// const routes = ['/', '/details', '/browser', '/browser/details', '/browser/comments'];
|
||||||
});
|
const routes = ['/', '/details'];
|
||||||
|
|
||||||
const requestCallback = (url, res) => (error, response, body) => {
|
const html = routes.map(route => `
|
||||||
if (!error && response.statusCode == 200) {
|
<form method="POST" action="${route}" accept-charset="UTF-8">
|
||||||
console.log('Response OK.');
|
<fieldset>
|
||||||
|
<legend>route: POST ${route}</legend>
|
||||||
const doc = new JSDOM(body, {url: url});
|
<input name="url">
|
||||||
const reader = new Readability(doc.window.document);
|
<button type="submit">SUBMIT</button>
|
||||||
const article = reader.parse();
|
</fieldset>
|
||||||
|
</form>`).join('<hr />');
|
||||||
if (article && article.content) {
|
res.send(html);
|
||||||
res.send(article.content);
|
|
||||||
} else {
|
|
||||||
res.sendStatus(404);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
console.log('Response error:', error ? error.toString() : response.statusCode);
|
|
||||||
res.sendStatus(response ? response.statusCode : 404);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
app.post('/', (req, res) => {
|
|
||||||
const url = req.body.url;
|
|
||||||
const requestOptions = {
|
|
||||||
url: url,
|
|
||||||
//headers: {'User-Agent': 'Googlebot/2.1 (+http://www.google.com/bot.html)'},
|
|
||||||
//headers: {'User-Agent': 'Twitterbot/1.0'},
|
|
||||||
headers: {
|
|
||||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0',
|
|
||||||
'X-Forwarded-For': '66.249.66.1',
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
console.log('Parse request for:', url);
|
|
||||||
|
|
||||||
request(requestOptions, requestCallback(url, res));
|
|
||||||
});
|
});
|
||||||
|
app.post('/', simple.scrape);
|
||||||
|
app.post('/details', simple.details);
|
||||||
|
// app.post('/browser', browser.scrape);
|
||||||
|
// app.post('/browser/details', browser.details);
|
||||||
|
// app.post('/browser/comments', browser.comments);
|
||||||
|
|
||||||
app.listen(port, () => {
|
app.listen(port, () => {
|
||||||
console.log(`Example app listening on port ${port}!`);
|
console.log(`Example app listening on port ${port}!`);
|
||||||
|
41
readerserver/scraper/simple.js
Normal file
41
readerserver/scraper/simple.js
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
const request = require('request');
|
||||||
|
const JSDOM = require('jsdom').JSDOM;
|
||||||
|
const { Readability } = require('readability');
|
||||||
|
|
||||||
|
const { headers } = require('../constants');
|
||||||
|
|
||||||
|
const options = url => ({
|
||||||
|
url,
|
||||||
|
headers,
|
||||||
|
});
|
||||||
|
|
||||||
|
const extract = (url, body) => {
|
||||||
|
const doc = new JSDOM(body, { url: url });
|
||||||
|
const reader = new Readability(doc.window.document);
|
||||||
|
return reader.parse();
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
module.exports.scrape = (req, res) => request(options(req.body.url), (error, response, body) => {
|
||||||
|
if (error || response.statusCode != 200) {
|
||||||
|
console.log('Response error:', error ? error.toString() : response.statusCode);
|
||||||
|
return res.sendStatus(response ? response.statusCode : 404);
|
||||||
|
}
|
||||||
|
const article = extract(req.body.url, body);
|
||||||
|
if (article && article.content) {
|
||||||
|
return res.send(article.content);
|
||||||
|
}
|
||||||
|
return res.sendStatus(404);
|
||||||
|
});
|
||||||
|
|
||||||
|
module.exports.details = (req, res) => request(options(req.body.url), (error, response, body) => {
|
||||||
|
if (error || response.statusCode != 200) {
|
||||||
|
console.log('Response error:', error ? error.toString() : response.statusCode);
|
||||||
|
return res.sendStatus(response ? response.statusCode : 404);
|
||||||
|
}
|
||||||
|
const article = extract(req.body.url, body);
|
||||||
|
if (article) {
|
||||||
|
return res.send(article);
|
||||||
|
}
|
||||||
|
return res.sendStatus(404);
|
||||||
|
});
|
@@ -72,7 +72,7 @@ class Article extends React.Component {
|
|||||||
}
|
}
|
||||||
|
|
||||||
displayComment(story, c, level) {
|
displayComment(story, c, level) {
|
||||||
const cid = c.author+c.date;
|
const cid = c.author + c.date;
|
||||||
|
|
||||||
const collapsed = this.state.collapsed.includes(cid);
|
const collapsed = this.state.collapsed.includes(cid);
|
||||||
const expanded = this.state.expanded.includes(cid);
|
const expanded = this.state.expanded.includes(cid);
|
||||||
@@ -85,19 +85,22 @@ class Article extends React.Component {
|
|||||||
<div className='info'>
|
<div className='info'>
|
||||||
<p>
|
<p>
|
||||||
{c.author === story.author ? '[OP]' : ''} {c.author || '[Deleted]'}
|
{c.author === story.author ? '[OP]' : ''} {c.author || '[Deleted]'}
|
||||||
{' '} | <HashLink to={'#'+cid} id={cid}>{moment.unix(c.date).fromNow()}</HashLink>
|
{' '} | <HashLink to={'#' + cid} id={cid}>{moment.unix(c.date).fromNow()}</HashLink>
|
||||||
|
|
||||||
{hidden || hasChildren &&
|
{hasChildren && (
|
||||||
<span className='collapser pointer' onClick={() => this.collapseComment(cid)}>–</span>
|
hidden ?
|
||||||
}
|
<span className='collapser expander pointer' onClick={() => this.expandComment(cid)}>+</span>
|
||||||
|
:
|
||||||
|
<span className='collapser pointer' onClick={() => this.collapseComment(cid)}>–</span>
|
||||||
|
)}
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div className={collapsed ? 'text hidden' : 'text'} dangerouslySetInnerHTML={{ __html: c.text }} />
|
<div className={collapsed ? 'text hidden' : 'text'} dangerouslySetInnerHTML={{ __html: c.text }} />
|
||||||
|
|
||||||
{hidden && hasChildren ?
|
{hidden && hasChildren ?
|
||||||
<div className='comment lined info pointer' onClick={() => this.expandComment(cid)}>[show {this.countComments(c)-1} more]</div>
|
<div className='comment lined info pointer' onClick={() => this.expandComment(cid)}>[show {this.countComments(c) - 1} more]</div>
|
||||||
:
|
:
|
||||||
c.comments.map(i => this.displayComment(story, i, level + 1))
|
c.comments.map(i => this.displayComment(story, i, level + 1))
|
||||||
}
|
}
|
||||||
</div>
|
</div>
|
||||||
@@ -130,7 +133,7 @@ class Article extends React.Component {
|
|||||||
{story.comments.map(c => this.displayComment(story, c, 0))}
|
{story.comments.map(c => this.displayComment(story, c, 0))}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
:
|
:
|
||||||
<p>loading...</p>
|
<p>loading...</p>
|
||||||
}
|
}
|
||||||
<ToggleDot id={id} article={true} />
|
<ToggleDot id={id} article={true} />
|
||||||
|
@@ -2,7 +2,7 @@ import React from 'react';
|
|||||||
import { Link } from 'react-router-dom';
|
import { Link } from 'react-router-dom';
|
||||||
import { Helmet } from 'react-helmet';
|
import { Helmet } from 'react-helmet';
|
||||||
import localForage from 'localforage';
|
import localForage from 'localforage';
|
||||||
import { sourceLink, infoLine, logos } from './utils.js';
|
import { sourceLink, infoLine, getLogoUrl } from './utils.js';
|
||||||
|
|
||||||
class Feed extends React.Component {
|
class Feed extends React.Component {
|
||||||
constructor(props) {
|
constructor(props) {
|
||||||
@@ -22,20 +22,21 @@ class Feed extends React.Component {
|
|||||||
const updated = !this.state.stories || this.state.stories[0].id !== result.stories[0].id;
|
const updated = !this.state.stories || this.state.stories[0].id !== result.stories[0].id;
|
||||||
console.log('updated:', updated);
|
console.log('updated:', updated);
|
||||||
|
|
||||||
this.setState({ stories: result.stories });
|
const { stories } = result;
|
||||||
localStorage.setItem('stories', JSON.stringify(result.stories));
|
this.setState({ stories });
|
||||||
|
localStorage.setItem('stories', JSON.stringify(stories));
|
||||||
|
|
||||||
if (updated) {
|
if (updated) {
|
||||||
localForage.clear();
|
localForage.clear();
|
||||||
result.stories.forEach((x, i) => {
|
stories.forEach((x, i) => {
|
||||||
fetch('/api/' + x.id)
|
fetch('/api/' + x.id)
|
||||||
.then(res => res.json())
|
.then(res => res.json())
|
||||||
.then(result => {
|
.then(({ story }) => {
|
||||||
localForage.setItem(x.id, result.story)
|
localForage.setItem(x.id, story)
|
||||||
.then(console.log('preloaded', x.id, x.title));
|
.then(console.log('preloaded', x.id, x.title));
|
||||||
this.props.updateCache(x.id, result.story);
|
this.props.updateCache(x.id, story);
|
||||||
}, error => {}
|
}, error => { }
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -61,7 +62,7 @@ class Feed extends React.Component {
|
|||||||
<div className='item' key={x.id}>
|
<div className='item' key={x.id}>
|
||||||
<div className='title'>
|
<div className='title'>
|
||||||
<Link className='link' to={'/' + x.id}>
|
<Link className='link' to={'/' + x.id}>
|
||||||
<img className='source-logo' src={logos[x.source]} alt='source logo' /> {x.title}
|
<img className='source-logo' src={getLogoUrl(x)} alt='source logo' /> {x.title}
|
||||||
</Link>
|
</Link>
|
||||||
|
|
||||||
<span className='source'>
|
<span className='source'>
|
||||||
@@ -73,7 +74,7 @@ class Feed extends React.Component {
|
|||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
:
|
:
|
||||||
<p>loading...</p>
|
<p>loading...</p>
|
||||||
}
|
}
|
||||||
</div>
|
</div>
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
import React from 'react';
|
import React from 'react';
|
||||||
import { Link } from 'react-router-dom';
|
import { Link } from 'react-router-dom';
|
||||||
import { Helmet } from 'react-helmet';
|
import { Helmet } from 'react-helmet';
|
||||||
import { sourceLink, infoLine, logos } from './utils.js';
|
import { sourceLink, infoLine, getLogoUrl } from './utils.js';
|
||||||
import AbortController from 'abort-controller';
|
import AbortController from 'abort-controller';
|
||||||
|
|
||||||
class Results extends React.Component {
|
class Results extends React.Component {
|
||||||
@@ -68,7 +68,7 @@ class Results extends React.Component {
|
|||||||
<div className='item' key={x.id}>
|
<div className='item' key={x.id}>
|
||||||
<div className='title'>
|
<div className='title'>
|
||||||
<Link className='link' to={'/' + x.id}>
|
<Link className='link' to={'/' + x.id}>
|
||||||
<img className='source-logo' src={logos[x.source]} alt='source logo' /> {x.title}
|
<img className='source-logo' src={getLogoUrl(x)} alt='source logo' /> {x.title}
|
||||||
</Link>
|
</Link>
|
||||||
|
|
||||||
<span className='source'>
|
<span className='source'>
|
||||||
@@ -79,12 +79,12 @@ class Results extends React.Component {
|
|||||||
{infoLine(x)}
|
{infoLine(x)}
|
||||||
</div>
|
</div>
|
||||||
)
|
)
|
||||||
:
|
:
|
||||||
<p>none</p>
|
<p>none</p>
|
||||||
}
|
}
|
||||||
</div>
|
</div>
|
||||||
</>
|
</>
|
||||||
:
|
:
|
||||||
<p>loading...</p>
|
<p>loading...</p>
|
||||||
}
|
}
|
||||||
</div>
|
</div>
|
||||||
|
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user