qotnews/apiserver/feed.py

150 lines
4.9 KiB
Python
Raw Normal View History

2019-08-24 08:49:11 +00:00
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
2019-08-24 08:49:11 +00:00
import requests
2019-08-25 23:49:08 +00:00
import time
2019-10-19 07:33:06 +00:00
from bs4 import BeautifulSoup
2019-08-24 08:49:11 +00:00
2019-11-08 05:55:30 +00:00
from feeds import hackernews, reddit, tildes, manual
2019-08-24 08:49:11 +00:00
2019-08-25 23:49:08 +00:00
OUTLINE_API = 'https://outlineapi.com/article'
2019-10-08 08:00:50 +00:00
ARCHIVE_API = 'https://archive.fo/submit/'
2019-08-24 08:49:11 +00:00
READ_API = 'http://127.0.0.1:33843'
ARCHIVE_FIRST = ['bloomberg.com', 'wsj.com']
INVALID_FILES = ['.pdf', '.png', '.jpg', '.gif']
INVALID_DOMAINS = ['youtube.com']
2019-11-08 21:50:33 +00:00
TWO_DAYS = 60*60*24*2
2019-08-24 08:49:11 +00:00
def list():
feed = []
feed += [(x, 'hackernews') for x in hackernews.feed()[:10]]
feed += [(x, 'reddit') for x in reddit.feed()[:10]]
feed += [(x, 'tildes') for x in tildes.feed()[:5]]
2019-08-24 08:49:11 +00:00
return feed
def get_article(url):
if any([domain in url for domain in ARCHIVE_FIRST]):
2019-10-08 08:00:50 +00:00
try:
logging.info('Article from {}, archiving first...'.format(url))
2019-10-08 08:00:50 +00:00
data = {'submitid': '9tjtS1EYe5wy8AJiYgVfH9P97uHU1IHG4lO67hsQpHOC3KKJrhqVIoQG2U7Rg%2Fpr', 'url': url}
r = requests.post(ARCHIVE_API, data=data, timeout=20, allow_redirects=False)
if r.status_code == 200:
logging.info('Submitted for archiving. Skipping to wait...')
2019-10-08 08:00:50 +00:00
return ''
elif 'location' in r.headers:
url = r.headers['location']
else:
raise Exception('Bad response code ' + str(r.status_code))
except KeyboardInterrupt:
raise
2019-10-08 08:00:50 +00:00
except BaseException as e:
logging.error('Problem archiving article: {}'.format(str(e)))
return ''
2019-08-25 23:49:08 +00:00
try:
params = {'source_url': url}
headers = {'Referer': 'https://outline.com/'}
r = requests.get(OUTLINE_API, params=params, headers=headers, timeout=20)
if r.status_code == 429:
logging.info('Rate limited by outline, sleeping 30s and skipping...')
2019-08-25 23:49:08 +00:00
time.sleep(30)
return ''
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
html = r.json()['data']['html']
if 'URL is not supported by Outline' in html:
raise Exception('URL not supported by Outline')
return html
except KeyboardInterrupt:
raise
2019-08-25 23:49:08 +00:00
except BaseException as e:
logging.error('Problem outlining article: {}'.format(str(e)))
logging.info('Trying our server instead...')
2019-08-24 08:49:11 +00:00
try:
r = requests.post(READ_API, data=dict(url=url), timeout=10)
if r.status_code != 200:
2019-08-25 23:49:08 +00:00
raise Exception('Bad response code ' + str(r.status_code))
2019-08-24 08:49:11 +00:00
return r.text
except KeyboardInterrupt:
raise
2019-08-24 08:49:11 +00:00
except BaseException as e:
logging.error('Problem getting article: {}'.format(str(e)))
return ''
2019-10-19 07:33:06 +00:00
def get_first_image(text):
soup = BeautifulSoup(text, features='html.parser')
try:
first_img = soup.find('img')
url = first_img['src']
headers = {'User-Agent': 'Twitterbot/1.0'}
length = requests.get(url, headers=headers).headers['Content-length']
if int(length) > 1000000: raise
return url
except:
return ''
2019-08-24 08:49:11 +00:00
def update_story(story):
res = {}
2019-08-25 07:46:22 +00:00
logging.info('Updating story ' + str(story['ref']))
2019-08-24 08:49:11 +00:00
if story['source'] == 'hackernews':
res = hackernews.story(story['ref'])
2019-08-24 21:37:43 +00:00
elif story['source'] == 'reddit':
res = reddit.story(story['ref'])
2019-08-25 00:36:26 +00:00
elif story['source'] == 'tildes':
res = tildes.story(story['ref'])
2019-11-08 05:55:30 +00:00
elif story['source'] == 'manual':
res = manual.story(story['ref'])
2019-08-24 08:49:11 +00:00
if res:
story.update(res) # join dicts
else:
logging.info('Article not ready yet')
return False
2019-11-08 21:50:33 +00:00
if story['date'] and story['date'] + TWO_DAYS < time.time():
logging.info('Article too old, removing')
return False
2019-08-24 08:49:11 +00:00
if story.get('url', '') and not story.get('text', ''):
if any([story['url'].endswith(ext) for ext in INVALID_FILES]):
logging.info('URL invalid file type')
return False
if any([domain in story['url'] for domain in INVALID_DOMAINS]):
logging.info('URL invalid domain')
return False
logging.info('Getting article ' + story['url'])
story['text'] = get_article(story['url'])
if not story['text']: return False
2019-10-19 07:33:06 +00:00
story['img'] = get_first_image(story['text'])
return True
if __name__ == '__main__':
2019-10-08 08:00:50 +00:00
#test_news_cache = {}
#nid = 'jean'
#ref = 20802050
#source = 'hackernews'
#test_news_cache[nid] = dict(id=nid, ref=ref, source=source)
#news_story = test_news_cache[nid]
#update_story(news_story)
2019-10-19 07:33:06 +00:00
#print(get_article('https://www.bloomberg.com/news/articles/2019-09-23/xi-s-communists-under-pressure-as-high-prices-hit-china-workers'))
a = get_article('https://blog.joinmastodon.org/2019/10/mastodon-3.0/')
print(a)
u = get_first_image(a)
print(u)
2019-10-08 08:00:50 +00:00
print('done')