qotnews/apiserver/feed.py

51 lines
1.4 KiB
Python
Raw Normal View History

2019-08-24 08:49:11 +00:00
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
import requests
2019-08-25 00:36:26 +00:00
from feeds import hackernews, reddit, tildes
2019-08-24 08:49:11 +00:00
READ_API = 'http://127.0.0.1:33843'
def list():
feed = []
feed += [(x, 'hackernews') for x in hackernews.feed()]
2019-08-24 21:37:43 +00:00
feed += [(x, 'reddit') for x in reddit.feed()]
2019-08-25 00:36:26 +00:00
feed += [(x, 'tildes') for x in tildes.feed()]
2019-08-24 08:49:11 +00:00
return feed
def get_article(url):
try:
r = requests.post(READ_API, data=dict(url=url), timeout=10)
if r.status_code != 200:
raise
return r.text
except BaseException as e:
logging.error('Problem getting article: {}'.format(str(e)))
return ''
def update_story(story):
res = {}
2019-08-25 07:46:22 +00:00
logging.info('Updating story ' + str(story['ref']))
2019-08-24 08:49:11 +00:00
if story['source'] == 'hackernews':
res = hackernews.story(story['ref'])
2019-08-24 21:37:43 +00:00
elif story['source'] == 'reddit':
res = reddit.story(story['ref'])
2019-08-25 00:36:26 +00:00
elif story['source'] == 'tildes':
res = tildes.story(story['ref'])
2019-08-24 08:49:11 +00:00
else:
return
if res:
story.update(res)
if story.get('url', '') and not story.get('text', ''):
if not story['url'].endswith('.pdf'):
2019-08-25 07:46:22 +00:00
logging.info('Getting article ' + story['url'])
2019-08-24 08:49:11 +00:00
story['text'] = get_article(story['url'])
else:
story['text'] = '<p>Unsupported article type.</p>'