qotnews/apiserver/feeds/webworm.py

123 lines
3.6 KiB
Python
Raw Normal View History

2020-11-02 03:07:05 +00:00
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
if __name__ == '__main__':
import sys
sys.path.insert(0,'.')
import requests
2020-11-02 21:31:14 +00:00
from datetime import datetime
2020-11-02 03:07:05 +00:00
from utils import clean
WEBWORM_DOMAIN = "https://www.webworm.co"
2020-11-02 21:31:14 +00:00
API_STORIES = lambda x: f"{WEBWORM_DOMAIN}/api/v1/archive?sort=new&search=&offset=0&limit=100"
2020-11-02 03:07:05 +00:00
#API_ITEM = lambda x : f'https://hn.algolia.com/api/v1/items/{x}'
API_ITEM_COMMENTS = lambda x: f"{WEBWORM_DOMAIN}/api/v1/post/{x}/comments?all_comments=true&sort=best_first"
2020-11-02 21:31:14 +00:00
SITE_LINK = lambda x: f"{WEBWORM_DOMAIN}/p/{x}"
SITE_AUTHOR_LINK = lambda x : f"{WEBWORM_DOMAIN}/people/{x}"
2020-11-02 03:07:05 +00:00
def api(route, ref=None):
try:
r = requests.get(route(ref), timeout=5)
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem hitting Substack API: {}, trying again'.format(str(e)))
try:
r = requests.get(route(ref), timeout=15)
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem hitting Substack API: {}'.format(str(e)))
return False
def feed():
stories = api(API_STORIES)
stories = list(filter(None, [None if i.get("audience") == "only_paid" else i for i in stories]))
return [str(i.get("id")) for i in stories or []]
def bylines(b):
if 'id' not in b:
return None
a = {}
a['name'] = b.get('name')
a['link'] = SITE_AUTHOR_LINK(b.get('id'))
return a
2020-11-02 21:31:14 +00:00
def unix(date_str):
return int(datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S.%fZ').timestamp())
2020-11-02 03:07:05 +00:00
def comment(i):
if 'body' not in i:
return False
c = {}
2020-11-02 21:31:14 +00:00
c['date'] = unix(i.get('date'))
2020-11-02 03:07:05 +00:00
c['author'] = i.get('name', '')
2020-11-02 22:06:43 +00:00
c['score'] = i.get('reactions').get('')
2020-11-02 03:07:05 +00:00
c['text'] = clean(i.get('body', '') or '')
c['comments'] = [comment(j) for j in i['children']]
c['comments'] = list(filter(bool, c['comments']))
2020-11-02 21:31:14 +00:00
2020-11-02 03:07:05 +00:00
return c
def comment_count(i):
alive = 1 if i['author'] else 0
return sum([comment_count(c) for c in i['comments']]) + alive
def story(ref):
stories = api(API_STORIES)
stories = list(filter(None, [None if i.get("audience") == "only_paid" else i for i in stories]))
stories = list(filter(None, [i if str(i.get('id')) == ref else None for i in stories]))
if len(stories) == 0:
print("no items")
return False
r = stories[0]
if not r:
print("not r")
return False
s = {}
s['author'] = ''
s['author_link'] = ''
2020-11-02 21:31:14 +00:00
s['date'] = unix(r.get('post_date'))
2020-11-02 03:07:05 +00:00
s['score'] = r.get('reactions').get('')
s['title'] = r.get('title', '')
s['link'] = r.get('canonical_url', '')
s['url'] = r.get('canonical_url', '')
2020-11-02 22:06:43 +00:00
comments = api(API_ITEM_COMMENTS, r.get('id'))
s['comments'] = [comment(i) for i in comments.get('comments')]
2020-11-02 03:07:05 +00:00
s['comments'] = list(filter(bool, s['comments']))
s['num_comments'] = r.get('comment_count', 0)
2020-11-02 21:31:14 +00:00
authors = list(filter(None, [bylines(byline) for byline in r.get('publishedBylines')]))
if len(authors):
s['author'] = authors[0].get('name')
s['author_link'] = authors[0].get('link')
2020-11-02 03:07:05 +00:00
if 'text' in r and r['text']:
s['text'] = clean(r['text'] or '')
return s
# scratchpad so I can quickly develop the parser
if __name__ == '__main__':
stories = feed()
print(stories)
print(story(stories[0]))