forked from tanner/qotnews
Compare commits
24 Commits
2f730c1f52
...
2439c113b3
Author | SHA1 | Date | |
---|---|---|---|
|
2439c113b3 | ||
|
0f5e28136d | ||
|
bb1413b586 | ||
|
0a27c0da1f | ||
|
fe01ea52e5 | ||
|
3daae5fa1b | ||
|
25caee17d6 | ||
|
c1b6349771 | ||
|
54a4c7e55a | ||
|
b12a3570b0 | ||
|
0bfa920654 | ||
|
9341b4d966 | ||
|
a2e5faa3b5 | ||
|
a86eb98c1a | ||
|
abf7f0a802 | ||
|
d288546d6f | ||
|
cc130942ca | ||
|
f0b14408d4 | ||
|
e1830a589b | ||
|
32bc3b906b | ||
|
f5e65632b8 | ||
|
1fe524207e | ||
|
dc3d17b171 | ||
|
539350a83d |
|
@ -73,6 +73,13 @@ def get_stories_by_url(url):
|
|||
filter(Story.meta['url'].as_string() == url).\
|
||||
order_by(Story.meta['date'].desc())
|
||||
|
||||
def get_ref_by_sid(sid):
|
||||
session = Session()
|
||||
x = session.query(Reflist).\
|
||||
filter(Reflist.sid == sid).\
|
||||
first()
|
||||
return dict(ref=x.ref, sid=x.sid, source=x.source, urlref=x.urlref)
|
||||
|
||||
def get_reflist():
|
||||
session = Session()
|
||||
q = session.query(Reflist).order_by(Reflist.rid.desc())
|
||||
|
|
|
@ -53,7 +53,7 @@ class Category(Base):
|
|||
# scratchpad so I can quickly develop the parser
|
||||
if __name__ == '__main__':
|
||||
print("Category: RadioNZ")
|
||||
site = Category("https://www.rnz.co.nz/news/")
|
||||
site = Category({ 'url': "https://www.rnz.co.nz/news/" })
|
||||
excludes = [
|
||||
'rnz.co.nz/news/sport',
|
||||
'rnz.co.nz/weather',
|
||||
|
@ -61,12 +61,12 @@ if __name__ == '__main__':
|
|||
]
|
||||
posts = site.feed(excludes)
|
||||
print(posts[:5])
|
||||
print(site.story(posts[0]))
|
||||
print(site.story(posts[0][0], posts[0][1]))
|
||||
|
||||
print("Category: Newsroom")
|
||||
site = Category("https://www.newsroom.co.nz/news/", tz='Pacific/Auckland')
|
||||
site = Category({ 'url': "https://www.newsroom.co.nz/news/", 'tz': 'Pacific/Auckland'})
|
||||
posts = site.feed()
|
||||
print(posts[:5])
|
||||
print(site.story(posts[0]))
|
||||
print(site.story(posts[0][0], posts[0][1]))
|
||||
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ def api(route, ref=None):
|
|||
return False
|
||||
|
||||
def feed():
|
||||
return [str(x) for x in api(API_TOPSTORIES) or []]
|
||||
return ['hn:'+str(x) for x in api(API_TOPSTORIES) or []]
|
||||
|
||||
def comment(i):
|
||||
if 'author' not in i:
|
||||
|
@ -60,6 +60,7 @@ def comment_count(i):
|
|||
return sum([comment_count(c) for c in i['comments']]) + alive
|
||||
|
||||
def story(ref):
|
||||
ref = ref.replace('hn:', '')
|
||||
r = api(API_ITEM, ref)
|
||||
if not r: return False
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ class Sitemap(Base):
|
|||
# scratchpad so I can quickly develop the parser
|
||||
if __name__ == '__main__':
|
||||
print("Sitemap: The Spinoff")
|
||||
site = Sitemap("https://thespinoff.co.nz/sitemap.xml")
|
||||
site = Sitemap({ 'url': "https://thespinoff.co.nz/sitemap.xml" })
|
||||
excludes = [
|
||||
'thespinoff.co.nz/sitemap-misc.xml',
|
||||
'thespinoff.co.nz/sitemap-authors.xml',
|
||||
|
@ -84,16 +84,18 @@ if __name__ == '__main__':
|
|||
]
|
||||
posts = site.feed(excludes)
|
||||
print(posts[:5])
|
||||
print(site.story(posts[0]))
|
||||
print(site.story(posts[0][0], posts[0][1]))
|
||||
|
||||
print("Sitemap: Newshub")
|
||||
site = Sitemap([
|
||||
site = Sitemap({
|
||||
'url': [
|
||||
'https://www.newshub.co.nz/home/politics.gnewssitemap.xml',
|
||||
'https://www.newshub.co.nz/home/new-zealand.gnewssitemap.xml',
|
||||
'https://www.newshub.co.nz/home/world.gnewssitemap.xml',
|
||||
'https://www.newshub.co.nz/home/money.gnewssitemap.xml',
|
||||
])
|
||||
],
|
||||
})
|
||||
posts = site.feed()
|
||||
print(posts[:5])
|
||||
print(site.story(posts[0]))
|
||||
print(site.story(posts[:-1]))
|
||||
print(site.story(posts[0][0], posts[0][1]))
|
||||
|
||||
|
|
|
@ -10,6 +10,10 @@ if __name__ == '__main__':
|
|||
import requests
|
||||
from datetime import datetime
|
||||
|
||||
import settings
|
||||
from misc.time import unix
|
||||
from misc.metadata import get_icons
|
||||
from misc.api import xml, json
|
||||
from utils import clean
|
||||
|
||||
SUBSTACK_REFERER = 'https://substack.com'
|
||||
|
@ -22,32 +26,6 @@ def api_comments(post_id, base_url):
|
|||
def api_stories(x, base_url):
|
||||
return f"{base_url}/api/v1/archive?sort=new&search=&offset=0&limit=100"
|
||||
|
||||
def unix(date_str):
|
||||
return int(datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S.%fZ').timestamp())
|
||||
|
||||
def api(route, ref=None, referer=None):
|
||||
headers = {'Referer': referer} if referer else None
|
||||
try:
|
||||
r = requests.get(route(ref), headers=headers, timeout=10)
|
||||
if r.status_code != 200:
|
||||
raise Exception('Bad response code ' + str(r.status_code))
|
||||
return r.json()
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except BaseException as e:
|
||||
logging.error('Problem hitting Substack API: {}, trying again'.format(str(e)))
|
||||
|
||||
try:
|
||||
r = requests.get(route(ref), headers=headers, timeout=20)
|
||||
if r.status_code != 200:
|
||||
raise Exception('Bad response code ' + str(r.status_code))
|
||||
return r.json()
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except BaseException as e:
|
||||
logging.error('Problem hitting Substack API: {}'.format(str(e)))
|
||||
return False
|
||||
|
||||
def comment(i):
|
||||
if 'body' not in i:
|
||||
return False
|
||||
|
@ -66,14 +44,25 @@ class Publication:
|
|||
def __init__(self, domain):
|
||||
self.BASE_DOMAIN = domain
|
||||
|
||||
def ref_prefix(self, ref):
|
||||
return f"{self.BASE_DOMAIN}/#id:{ref}"
|
||||
|
||||
def strip_ref_prefix(self, ref):
|
||||
return ref.replace(f"{self.BASE_DOMAIN}/#id:", '')
|
||||
|
||||
def feed(self):
|
||||
stories = api(lambda x: api_stories(x, self.BASE_DOMAIN), referer=self.BASE_DOMAIN)
|
||||
too_old = datetime.now().timestamp() - settings.MAX_STORY_AGE
|
||||
stories = json(lambda x: api_stories(x, self.BASE_DOMAIN), headers={'Referer': self.BASE_DOMAIN})
|
||||
if not stories: return []
|
||||
stories = list(filter(None, [i if i.get("audience") == "everyone" else None for i in stories]))
|
||||
return [str(i.get("id")) for i in stories or []]
|
||||
stories = list(filter(None, [i if unix(i.get('post_date')) > too_old else None for i in stories]))
|
||||
stories.sort(key=lambda a: unix(a.get('post_date')), reverse=True)
|
||||
|
||||
return [self.ref_prefix(str(i.get("id"))) for i in stories or []]
|
||||
|
||||
def story(self, ref):
|
||||
stories = api(lambda x: api_stories(x, self.BASE_DOMAIN), referer=self.BASE_DOMAIN)
|
||||
ref = self.strip_ref_prefix(ref)
|
||||
stories = json(lambda x: api_stories(x, self.BASE_DOMAIN), headers={'Referer': self.BASE_DOMAIN})
|
||||
if not stories: return False
|
||||
stories = list(filter(None, [i if i.get("audience") == "everyone" else None for i in stories]))
|
||||
stories = list(filter(None, [i if str(i.get('id')) == ref else None for i in stories]))
|
||||
|
@ -94,7 +83,7 @@ class Publication:
|
|||
s['title'] = r.get('title', '')
|
||||
s['link'] = r.get('canonical_url', '')
|
||||
s['url'] = r.get('canonical_url', '')
|
||||
comments = api(lambda x: api_comments(x, self.BASE_DOMAIN), r.get('id'), referer=self.BASE_DOMAIN)
|
||||
comments = json(lambda x: api_comments(x, self.BASE_DOMAIN), r.get('id'), headers={'Referer': self.BASE_DOMAIN})
|
||||
s['comments'] = [comment(i) for i in comments.get('comments')]
|
||||
s['comments'] = list(filter(bool, s['comments']))
|
||||
s['num_comments'] = r.get('comment_count', 0)
|
||||
|
@ -104,6 +93,12 @@ class Publication:
|
|||
s['author'] = authors[0].get('name')
|
||||
s['author_link'] = authors[0].get('link')
|
||||
|
||||
markup = xml(lambda x: s['link'])
|
||||
if markup:
|
||||
icons = get_icons(markup, url=s['link'])
|
||||
if icons:
|
||||
s['icon'] = icons[0]
|
||||
|
||||
return s
|
||||
|
||||
def _bylines(self, b):
|
||||
|
@ -116,14 +111,28 @@ class Publication:
|
|||
|
||||
|
||||
class Top:
|
||||
def ref_prefix(self, base_url, ref):
|
||||
return f"{base_url}/#id:{ref}"
|
||||
|
||||
def strip_ref_prefix(self, ref):
|
||||
if '/#id:' in ref:
|
||||
base_url, item = ref.split(f"/#id:")
|
||||
return item
|
||||
return ref
|
||||
|
||||
def feed(self):
|
||||
stories = api(SUBSTACK_API_TOP_POSTS, referer=SUBSTACK_REFERER)
|
||||
too_old = datetime.now().timestamp() - settings.MAX_STORY_AGE
|
||||
stories = json(SUBSTACK_API_TOP_POSTS, headers={'Referer': SUBSTACK_REFERER})
|
||||
if not stories: return []
|
||||
stories = list(filter(None, [i if i.get("audience") == "everyone" else None for i in stories]))
|
||||
return [str(i.get("id")) for i in stories or []]
|
||||
stories = list(filter(None, [i if unix(i.get('post_date')) > too_old else None for i in stories]))
|
||||
stories.sort(key=lambda a: unix(a.get('post_date')), reverse=True)
|
||||
stories = [self.ref_prefix(str(i.get("pub").get("base_url")), str(i.get("id"))) for i in stories]
|
||||
return stories
|
||||
|
||||
def story(self, ref):
|
||||
stories = api(SUBSTACK_API_TOP_POSTS, referer=SUBSTACK_REFERER)
|
||||
ref = self.strip_ref_prefix(ref)
|
||||
stories = json(SUBSTACK_API_TOP_POSTS, headers={'Referer': SUBSTACK_REFERER})
|
||||
if not stories: return False
|
||||
stories = list(filter(None, [i if i.get("audience") == "everyone" else None for i in stories]))
|
||||
stories = list(filter(None, [i if str(i.get('id')) == ref else None for i in stories]))
|
||||
|
@ -146,7 +155,7 @@ class Top:
|
|||
s['title'] = r.get('title', '')
|
||||
s['link'] = r.get('canonical_url', '')
|
||||
s['url'] = r.get('canonical_url', '')
|
||||
comments = api(lambda x: api_comments(x, base_url), r.get('id'), referer=SUBSTACK_REFERER)
|
||||
comments = json(lambda x: api_comments(x, base_url), r.get('id'), headers={'Referer': SUBSTACK_REFERER})
|
||||
s['comments'] = [comment(i) for i in comments.get('comments')]
|
||||
s['comments'] = list(filter(bool, s['comments']))
|
||||
s['num_comments'] = r.get('comment_count', 0)
|
||||
|
|
|
@ -5,13 +5,16 @@ logging.basicConfig(
|
|||
|
||||
import requests
|
||||
|
||||
USER_AGENT = "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
|
||||
FORWARD_IP = '66.249.66.1'
|
||||
GOOGLEBOT_USER_AGENT = "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
|
||||
GOOGLEBOT_IP = '66.249.66.1'
|
||||
TIMEOUT = 30
|
||||
|
||||
def xml(route, ref=None):
|
||||
def xml(route, ref=None, headers=dict(), use_googlebot=True):
|
||||
try:
|
||||
headers = {'User-Agent': USER_AGENT, 'X-Forwarded-For': FORWARD_IP}
|
||||
r = requests.get(route(ref), headers=headers, timeout=5)
|
||||
if use_googlebot:
|
||||
headers['User-Agent'] = GOOGLEBOT_USER_AGENT
|
||||
headers['X-Forwarded-For'] = GOOGLEBOT_IP
|
||||
r = requests.get(route(ref), headers=headers, timeout=TIMEOUT)
|
||||
if r.status_code != 200:
|
||||
raise Exception('Bad response code ' + str(r.status_code))
|
||||
return r.text
|
||||
|
@ -21,10 +24,12 @@ def xml(route, ref=None):
|
|||
logging.error('Problem hitting URL: {}'.format(str(e)))
|
||||
return False
|
||||
|
||||
def json(route, ref=None):
|
||||
def json(route, ref=None, headers=dict(), use_googlebot=True):
|
||||
try:
|
||||
headers = {'User-Agent': USER_AGENT, 'X-Forwarded-For': FORWARD_IP}
|
||||
r = requests.get(route(ref), headers=headers, timeout=5)
|
||||
if use_googlebot:
|
||||
headers['User-Agent'] = GOOGLEBOT_USER_AGENT
|
||||
headers['X-Forwarded-For'] = GOOGLEBOT_IP
|
||||
r = requests.get(route(ref), headers=headers, timeout=TIMEOUT)
|
||||
if r.status_code != 200:
|
||||
raise Exception('Bad response code ' + str(r.status_code))
|
||||
return r.json()
|
||||
|
|
14
apiserver/misc/icons.py
Normal file
14
apiserver/misc/icons.py
Normal file
|
@ -0,0 +1,14 @@
|
|||
from bs4 import BeautifulSoup
|
||||
|
||||
def get_icons(markup):
|
||||
soup = BeautifulSoup(markup, features='html.parser')
|
||||
icon32 = soup.find_all('link', rel="icon", href=True, sizes="32x32")
|
||||
icon16 = soup.find_all('link', rel="icon", href=True, sizes="16x16")
|
||||
favicon = soup.find_all('link', rel="shortcut icon", href=True)
|
||||
others = soup.find_all('link', rel="icon", href=True)
|
||||
icons = icon32 + icon16 + favicon + others
|
||||
base_url = '/'.join(urlref.split('/')[:3])
|
||||
icons = list(set([i.get('href') for i in icons]))
|
||||
icons = [i if i.startswith('http') else base_url + i for i in icons]
|
||||
|
||||
return icons
|
|
@ -1,4 +1,19 @@
|
|||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
def get_icons(markup, url):
|
||||
soup = BeautifulSoup(markup, features='html.parser')
|
||||
icon32 = soup.find_all('link', rel="icon", href=True, sizes="32x32")
|
||||
icon16 = soup.find_all('link', rel="icon", href=True, sizes="16x16")
|
||||
favicon = soup.find_all('link', rel="shortcut icon", href=True)
|
||||
others = soup.find_all('link', rel="icon", href=True)
|
||||
icons = icon32 + icon16 + favicon + others
|
||||
base_url = '/'.join(url.split('/')[:3])
|
||||
icons = list(set([i.get('href') for i in icons]))
|
||||
icons = [i if i.startswith('http') else base_url + i for i in icons]
|
||||
|
||||
return icons
|
||||
|
||||
def parse_extruct(s, data):
|
||||
rdfa_keys = {
|
||||
'title': [
|
||||
|
|
|
@ -11,9 +11,10 @@ import extruct
|
|||
|
||||
import settings
|
||||
from utils import clean
|
||||
from misc.metadata import parse_extruct
|
||||
from misc.metadata import parse_extruct, get_icons
|
||||
from misc.time import unix
|
||||
from misc.api import xml
|
||||
import misc.stuff as stuff
|
||||
|
||||
def comment(i):
|
||||
if 'author' not in i:
|
||||
|
@ -68,16 +69,7 @@ class Base:
|
|||
s['url'] = urlref
|
||||
s['date'] = 0
|
||||
|
||||
soup = BeautifulSoup(markup, features='html.parser')
|
||||
icon32 = soup.find_all('link', rel="icon", href=True, sizes="32x32")
|
||||
icon16 = soup.find_all('link', rel="icon", href=True, sizes="16x16")
|
||||
favicon = soup.find_all('link', rel="shortcut icon", href=True)
|
||||
others = soup.find_all('link', rel="icon", href=True)
|
||||
icons = icon32 + icon16 + favicon + others
|
||||
base_url = '/'.join(urlref.split('/')[:3])
|
||||
icons = list(set([i.get('href') for i in icons]))
|
||||
icons = [i if i.startswith('http') else base_url + i for i in icons]
|
||||
|
||||
icons = get_icons(markup, url=urlref)
|
||||
if icons:
|
||||
s['icon'] = icons[0]
|
||||
|
||||
|
@ -89,13 +81,18 @@ class Base:
|
|||
if 'disqus' in markup:
|
||||
try:
|
||||
s['comments'] = declutter.get_comments(urlref)
|
||||
c['comments'] = list(filter(bool, c['comments']))
|
||||
s['comments'] = list(filter(bool, s['comments']))
|
||||
s['num_comments'] = comment_count(s['comments'])
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except:
|
||||
pass
|
||||
|
||||
if urlref.startswith('https://www.stuff.co.nz'):
|
||||
s['comments'] = stuff.get_comments(urlref)
|
||||
s['comments'] = list(filter(bool, s['comments']))
|
||||
s['num_comments'] = len(s['comments'])
|
||||
|
||||
if not s['date']:
|
||||
return False
|
||||
return s
|
||||
|
|
64
apiserver/misc/stuff.py
Normal file
64
apiserver/misc/stuff.py
Normal file
|
@ -0,0 +1,64 @@
|
|||
import re
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
sys.path.insert(0,'.')
|
||||
|
||||
from misc.time import unix
|
||||
from misc.api import xml
|
||||
|
||||
def _soup_get_text(soup):
|
||||
if not soup: return None
|
||||
if soup.text: return soup.text
|
||||
|
||||
s = soup.find(text=lambda tag: isinstance(tag, bs4.CData))
|
||||
if s and s.string: return s.string.strip()
|
||||
return None
|
||||
|
||||
def _parse_comment(soup):
|
||||
c = {
|
||||
'author': '',
|
||||
'authorLink': '',
|
||||
'score': 0,
|
||||
'date': 0,
|
||||
'text': '',
|
||||
'comments': [],
|
||||
}
|
||||
|
||||
if soup.find('link'):
|
||||
title = _soup_get_text(soup.find('link'))
|
||||
if title and 'By:' in title:
|
||||
c['author'] = title.strip('By:').strip()
|
||||
if soup.find('dc:creator'):
|
||||
c['author'] = _soup_get_text(soup.find('dc:creator'))
|
||||
if soup.find('link'):
|
||||
c['authorLink'] = _soup_get_text(soup.find('link'))
|
||||
if soup.find('description'):
|
||||
c['text'] = _soup_get_text(soup.find('description'))
|
||||
if soup.find('pubdate'):
|
||||
c['date'] = unix(soup.find('pubdate').text)
|
||||
elif soup.find('pubDate'):
|
||||
c['date'] = unix(soup.find('pubDate').text)
|
||||
|
||||
return c
|
||||
|
||||
def get_comments(url):
|
||||
regex = r"https:\/\/www\.stuff\.co\.nz\/(.*\/\d+)/[^\/]+"
|
||||
p = re.compile(regex).match(url)
|
||||
path = p.groups()[0]
|
||||
comment_url = f'https://comments.us1.gigya.com/comments/rss/6201101/Stuff/stuff/{path}'
|
||||
markup = xml(lambda x: comment_url)
|
||||
if not markup: return []
|
||||
soup = BeautifulSoup(markup, features='html.parser')
|
||||
comments = soup.find_all('item')
|
||||
if not comments: return []
|
||||
comments = [_parse_comment(c) for c in comments]
|
||||
return comments
|
||||
|
||||
|
||||
# scratchpad so I can quickly develop the parser
|
||||
if __name__ == '__main__':
|
||||
comments = get_comments('https://www.stuff.co.nz/life-style/homed/houses/123418468/dear-jacinda-we-need-to-talk-about-housing')
|
||||
print(len(comments))
|
||||
print(comments[:5])
|
|
@ -4,9 +4,9 @@ logging.basicConfig(
|
|||
level=logging.DEBUG)
|
||||
import requests
|
||||
|
||||
DECLUTTER_API = 'https://declutter.1j.nz/details'
|
||||
DECLUTTER_COMMENT_API = 'https://declutter.1j.nz/comments'
|
||||
TIMEOUT = 30
|
||||
DECLUTTER_API = 'https://declutter.1j.nz/headless/details'
|
||||
DECLUTTER_COMMENT_API = 'https://declutter.1j.nz/headless/comments'
|
||||
TIMEOUT = 90
|
||||
|
||||
|
||||
def get_html(url):
|
||||
|
|
|
@ -3,15 +3,14 @@ logging.basicConfig(
|
|||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||
level=logging.DEBUG)
|
||||
import requests
|
||||
from settings import READER_PORT
|
||||
|
||||
READ_API = 'http://127.0.0.1:{}/headless/details'.format(READER_PORT or 3000)
|
||||
READ_COMMENT__API = 'http://127.0.0.1:{}/headless/comments'.format(READER_PORT or 3000)
|
||||
TIMEOUT = 60
|
||||
from settings import HEADLESS_READER_PORT
|
||||
|
||||
READ_API = 'http://127.0.0.1:{}/headless/details'.format(HEADLESS_READER_PORT or 33843)
|
||||
READ_COMMENT__API = 'http://127.0.0.1:{}/headless/comments'.format(HEADLESS_READER_PORT or 33843)
|
||||
TIMEOUT = 90
|
||||
|
||||
def get_html(url):
|
||||
logging.info(f"Headless Browser Scraper: {url}")
|
||||
logging.info(f"Headless Scraper: {url}")
|
||||
details = get_details(url)
|
||||
if not details:
|
||||
return ''
|
||||
|
|
|
@ -3,9 +3,9 @@ logging.basicConfig(
|
|||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||
level=logging.DEBUG)
|
||||
import requests
|
||||
from settings import READER_PORT
|
||||
from settings import SIMPLE_READER_PORT
|
||||
|
||||
READ_API = 'http://127.0.0.1:{}/simple/details'.format(READER_PORT or 3000)
|
||||
READ_API = 'http://127.0.0.1:{}/simple/details'.format(SIMPLE_READER_PORT or 33843)
|
||||
TIMEOUT = 20
|
||||
|
||||
def get_html(url):
|
||||
|
|
|
@ -99,6 +99,8 @@ def submit():
|
|||
def story(sid):
|
||||
story = database.get_story(sid)
|
||||
if story:
|
||||
related = []
|
||||
if story.meta['url']:
|
||||
related = database.get_stories_by_url(story.meta['url'])
|
||||
related = [r.meta for r in related]
|
||||
res = Response(json.dumps({"story": story.data, "related": related}))
|
||||
|
@ -153,6 +155,7 @@ def _add_new_refs():
|
|||
database.put_ref(ref, nid, source, urlref)
|
||||
logging.info('Added ref ' + ref)
|
||||
except database.IntegrityError:
|
||||
logging.info('Unable to add ref ' + ref)
|
||||
continue
|
||||
|
||||
def _update_current_story(item):
|
||||
|
@ -165,8 +168,11 @@ def _update_current_story(item):
|
|||
|
||||
valid = feed.update_story(story, urlref=item['urlref'])
|
||||
if valid:
|
||||
try:
|
||||
database.put_story(story)
|
||||
search.put_story(story)
|
||||
except database.IntegrityError:
|
||||
logging.info('Unable to add story with ref ' + ref)
|
||||
else:
|
||||
database.del_ref(item['ref'])
|
||||
logging.info('Removed ref {}'.format(item['ref']))
|
||||
|
|
|
@ -6,7 +6,8 @@ MAX_STORY_AGE = 3*24*60*60
|
|||
|
||||
SCRAPERS = ['headless', 'outline', 'declutter', 'simple']
|
||||
API_PORT = 33842
|
||||
READER_PORT = 3000
|
||||
SIMPLE_READER_PORT = 33843
|
||||
HEADLESS_READER_PORT = 33843
|
||||
|
||||
# Feed Lengths
|
||||
# Number of top items from each site to pull
|
||||
|
|
48
apiserver/update-story.py
Normal file
48
apiserver/update-story.py
Normal file
|
@ -0,0 +1,48 @@
|
|||
import logging
|
||||
logging.basicConfig(
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||
level=logging.INFO)
|
||||
|
||||
import sys
|
||||
import json
|
||||
import requests
|
||||
|
||||
import database
|
||||
import feed
|
||||
import search
|
||||
|
||||
database.init()
|
||||
search.init()
|
||||
|
||||
def _update_current_story(story, item):
|
||||
logging.info('Updating story: {}'.format(str(story['ref'])))
|
||||
|
||||
if story.get('url', ''):
|
||||
story['text'] = ''
|
||||
|
||||
valid = feed.update_story(story, urlref=item['urlref'])
|
||||
if valid:
|
||||
database.put_story(story)
|
||||
search.put_story(story)
|
||||
else:
|
||||
database.del_ref(item['ref'])
|
||||
logging.info('Removed ref {}'.format(item['ref']))
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) == 2:
|
||||
sid = sys.argv[1]
|
||||
else:
|
||||
print('Usage: python delete-story.py [story id]')
|
||||
exit(1)
|
||||
|
||||
item = database.get_ref_by_sid(sid)
|
||||
|
||||
if item:
|
||||
story = database.get_story(item['sid']).data
|
||||
if story:
|
||||
print('Updating story:')
|
||||
_update_current_story(story, item)
|
||||
else:
|
||||
print('Story not found. Exiting.')
|
||||
else:
|
||||
print('Story not found. Exiting.')
|
|
@ -9,7 +9,7 @@ import string
|
|||
from bleach.sanitizer import Cleaner
|
||||
|
||||
def gen_rand_id():
|
||||
return ''.join(random.choice(string.ascii_uppercase) for _ in range(4))
|
||||
return ''.join(random.choice(string.ascii_uppercase) for _ in range(5))
|
||||
|
||||
def render_md(md):
|
||||
if md:
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 50a94df7283e31680c5d94dd666bab58aea2e475
|
||||
Subproject commit d3d5fc74acf0be8a49e2772b42ab59278d1a3e81
|
|
@ -71,7 +71,7 @@ class App extends React.Component {
|
|||
<Route path='/search' component={Results} />
|
||||
<Route path='/:id' exact render={(props) => <Article {...props} cache={this.cache} />} />
|
||||
</Switch>
|
||||
<Route path='/:id/c' exact render={(props) => <Comments {...props} cache={this.cache} />} />
|
||||
<Route path='/:id/c' exact render={(props) => <Comments {...props} cache={this.cache} key={props.match.params.id} />} />
|
||||
|
||||
<ForwardDot />
|
||||
|
||||
|
|
|
@ -102,7 +102,8 @@ span.source {
|
|||
font-size: 1.4rem;
|
||||
}
|
||||
|
||||
.article h3, .article h4 {
|
||||
.article h3,
|
||||
.article h4 {
|
||||
font-size: 1.3rem;
|
||||
}
|
||||
|
||||
|
@ -111,7 +112,8 @@ span.source {
|
|||
height: auto;
|
||||
}
|
||||
|
||||
.article figure, .article video {
|
||||
.article figure,
|
||||
.article video {
|
||||
width: 100%;
|
||||
height: auto;
|
||||
margin: 0;
|
||||
|
@ -144,7 +146,7 @@ span.source {
|
|||
}
|
||||
|
||||
.story-text {
|
||||
font: 1.2rem/1.5 'Apparatus SIL', sans-serif;
|
||||
font: 1.2rem/1.5 "Apparatus SIL", sans-serif;
|
||||
margin-top: 1em;
|
||||
}
|
||||
|
||||
|
@ -196,7 +198,7 @@ span.source {
|
|||
}
|
||||
|
||||
.toggleDot .button {
|
||||
font: 2rem/1 'icomoon';
|
||||
font: 2rem/1 "icomoon";
|
||||
position: relative;
|
||||
top: 0.5rem;
|
||||
left: 0.55rem;
|
||||
|
@ -214,7 +216,7 @@ span.source {
|
|||
}
|
||||
|
||||
.forwardDot .button {
|
||||
font: 2.5rem/1 'icomoon';
|
||||
font: 2.5rem/1 "icomoon";
|
||||
position: relative;
|
||||
top: 0.25rem;
|
||||
left: 0.3rem;
|
||||
|
@ -223,3 +225,7 @@ span.source {
|
|||
.search form {
|
||||
display: inline;
|
||||
}
|
||||
|
||||
.indented {
|
||||
padding: 0 0 0 1rem;
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import React from 'react';
|
||||
import { Helmet } from 'react-helmet';
|
||||
import localForage from 'localforage';
|
||||
import { sourceLink, infoLine, ToggleDot } from '../utils.js';
|
||||
import { sourceLink, infoLine, otherDiscussions, ToggleDot } from '../utils.js';
|
||||
|
||||
class Article extends React.Component {
|
||||
constructor(props) {
|
||||
|
@ -14,6 +14,7 @@ class Article extends React.Component {
|
|||
|
||||
this.state = {
|
||||
story: cache[id] || false,
|
||||
related: [],
|
||||
error: false,
|
||||
pConv: [],
|
||||
};
|
||||
|
@ -22,21 +23,16 @@ class Article extends React.Component {
|
|||
componentDidMount() {
|
||||
const id = this.props.match ? this.props.match.params.id : 'CLOL';
|
||||
|
||||
localForage.getItem(id)
|
||||
.then(
|
||||
(value) => {
|
||||
if (value) {
|
||||
this.setState({ story: value });
|
||||
}
|
||||
}
|
||||
);
|
||||
localForage.getItem(id).then((value) => value ? this.setState({ story: value }) : null);
|
||||
localForage.getItem(`related-${id}`).then((value) => value ? this.setState({ related: value }) : null);
|
||||
|
||||
fetch('/api/' + id)
|
||||
.then(res => res.json())
|
||||
.then(
|
||||
(result) => {
|
||||
this.setState({ story: result.story });
|
||||
this.setState({ story: result.story, related: result.related });
|
||||
localForage.setItem(id, result.story);
|
||||
localForage.setItem(`related-${id}`, result.related);
|
||||
},
|
||||
(error) => {
|
||||
this.setState({ error: true });
|
||||
|
@ -51,6 +47,7 @@ class Article extends React.Component {
|
|||
render() {
|
||||
const id = this.props.match ? this.props.match.params.id : 'CLOL';
|
||||
const story = this.state.story;
|
||||
const related = this.state.related.filter(r => r.id != id);
|
||||
const error = this.state.error;
|
||||
const pConv = this.state.pConv;
|
||||
let nodes = null;
|
||||
|
@ -77,6 +74,7 @@ class Article extends React.Component {
|
|||
</div>
|
||||
|
||||
{infoLine(story)}
|
||||
{otherDiscussions(related)}
|
||||
|
||||
{nodes ?
|
||||
<div className='story-text'>
|
||||
|
|
|
@ -4,9 +4,9 @@ import { HashLink } from 'react-router-hash-link';
|
|||
import { Helmet } from 'react-helmet';
|
||||
import moment from 'moment';
|
||||
import localForage from 'localforage';
|
||||
import { infoLine, ToggleDot } from '../utils.js';
|
||||
import { infoLine, otherDiscussions, ToggleDot } from '../utils.js';
|
||||
|
||||
class Article extends React.Component {
|
||||
class Comments extends React.Component {
|
||||
constructor(props) {
|
||||
super(props);
|
||||
|
||||
|
@ -17,6 +17,7 @@ class Article extends React.Component {
|
|||
|
||||
this.state = {
|
||||
story: cache[id] || false,
|
||||
related: [],
|
||||
error: false,
|
||||
collapsed: [],
|
||||
expanded: [],
|
||||
|
@ -26,24 +27,21 @@ class Article extends React.Component {
|
|||
componentDidMount() {
|
||||
const id = this.props.match.params.id;
|
||||
|
||||
localForage.getItem(id)
|
||||
.then(
|
||||
(value) => {
|
||||
this.setState({ story: value });
|
||||
}
|
||||
);
|
||||
localForage.getItem(id).then((value) => this.setState({ story: value }));
|
||||
localForage.getItem(`related-${id}`).then((value) => value ? this.setState({ related: value }) : null);
|
||||
|
||||
fetch('/api/' + id)
|
||||
.then(res => res.json())
|
||||
.then(
|
||||
(result) => {
|
||||
this.setState({ story: result.story }, () => {
|
||||
this.setState({ story: result.story, related: result.related }, () => {
|
||||
const hash = window.location.hash.substring(1);
|
||||
if (hash) {
|
||||
document.getElementById(hash).scrollIntoView();
|
||||
}
|
||||
});
|
||||
localForage.setItem(id, result.story);
|
||||
localForage.setItem(`related-${id}`, result.related);
|
||||
},
|
||||
(error) => {
|
||||
this.setState({ error: true });
|
||||
|
@ -110,6 +108,7 @@ class Article extends React.Component {
|
|||
render() {
|
||||
const id = this.props.match.params.id;
|
||||
const story = this.state.story;
|
||||
const related = this.state.related.filter(r => r.id != id);
|
||||
const error = this.state.error;
|
||||
|
||||
return (
|
||||
|
@ -128,6 +127,7 @@ class Article extends React.Component {
|
|||
</div>
|
||||
|
||||
{infoLine(story)}
|
||||
{otherDiscussions(related)}
|
||||
|
||||
<div className='comments'>
|
||||
{story.comments.map(c => this.displayComment(story, c, 0))}
|
||||
|
@ -142,4 +142,4 @@ class Article extends React.Component {
|
|||
}
|
||||
}
|
||||
|
||||
export default Article;
|
||||
export default Comments;
|
||||
|
|
|
@ -30,10 +30,13 @@ class Feed extends React.Component {
|
|||
stories.forEach((x, i) => {
|
||||
fetch('/api/' + x.id)
|
||||
.then(res => res.json())
|
||||
.then(({ story }) => {
|
||||
localForage.setItem(x.id, story)
|
||||
.then(console.log('preloaded', x.id, x.title));
|
||||
.then(({ story, related }) => {
|
||||
Promise.all([
|
||||
localForage.setItem(x.id, story),
|
||||
localForage.setItem(`related-${x.id}`, related)
|
||||
]).then(console.log('preloaded', x.id, x.title));
|
||||
this.props.updateCache(x.id, story);
|
||||
this.props.updateCache(`related-${x.id}`, related);
|
||||
}, error => { }
|
||||
);
|
||||
});
|
||||
|
|
|
@ -15,18 +15,37 @@ export const sourceLink = (story) => {
|
|||
|
||||
export const infoLine = (story) => (
|
||||
<div className="info">
|
||||
{story.score} points by <a href={story.author_link}>{story.author}</a>
|
||||
{story.score} points by {story.author_link ? <a href={story.author_link}>{story.author}</a> : story.author}
|
||||
​ {moment.unix(story.date).fromNow()}
|
||||
​ on <a href={story.link}>{story.source}</a> | ​
|
||||
<Link
|
||||
className={story.num_comments > 99 ? "hot" : ""}
|
||||
to={"/" + story.id + "/c"}
|
||||
>
|
||||
to={"/" + story.id + "/c"}>
|
||||
{story.num_comments} comment{story.num_comments !== 1 && "s"}
|
||||
</Link>
|
||||
</div>
|
||||
);
|
||||
|
||||
export const otherDiscussions = (related) => {
|
||||
const stories = related.filter(r => r.num_comments > 0);
|
||||
if (!stories.length) {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
<div className='related indented info'>
|
||||
<span>Other discussions: </span>
|
||||
{stories.map((story, i) =>
|
||||
<span id={story.id}>
|
||||
{i !== 0 ? <> • </> : <></>}
|
||||
<Link className={story.num_comments > 99 ? "hot" : ""} to={"/" + story.id + "/c"}>
|
||||
{story.source} ({story.num_comments} comment{story.num_comments !== 1 && "s"})
|
||||
</Link>
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export class ToggleDot extends React.Component {
|
||||
render() {
|
||||
const id = this.props.id;
|
||||
|
|
5321
webclient/yarn.lock
5321
webclient/yarn.lock
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user