2019-08-24 08:49:11 +00:00
|
|
|
import logging
|
|
|
|
logging.basicConfig(
|
|
|
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
2019-08-28 04:13:02 +00:00
|
|
|
level=logging.DEBUG)
|
2019-08-24 08:49:11 +00:00
|
|
|
|
|
|
|
import requests
|
2019-08-25 23:49:08 +00:00
|
|
|
import time
|
2019-10-19 07:33:06 +00:00
|
|
|
from bs4 import BeautifulSoup
|
2019-08-24 08:49:11 +00:00
|
|
|
|
2020-11-02 02:26:54 +00:00
|
|
|
import settings
|
2020-11-03 22:08:50 +00:00
|
|
|
from feeds import hackernews, reddit, tildes, substack, manual, news
|
2020-11-04 02:00:58 +00:00
|
|
|
from scrapers import outline, declutter, local
|
2019-08-24 08:49:11 +00:00
|
|
|
|
2020-11-09 01:36:51 +00:00
|
|
|
ONE_HOUR = 60*60
|
|
|
|
ONE_DAY = 24*ONE_HOUR
|
|
|
|
|
2019-12-15 22:47:33 +00:00
|
|
|
INVALID_DOMAINS = ['youtube.com', 'bloomberg.com', 'wsj.com']
|
2020-11-09 01:36:51 +00:00
|
|
|
MAX_AGE_IN_DAYS = 3*ONE_DAY
|
2019-09-24 08:22:06 +00:00
|
|
|
|
2020-11-03 04:01:29 +00:00
|
|
|
substacks = {}
|
2020-11-03 04:04:46 +00:00
|
|
|
for key, value in settings.SUBSTACK.items():
|
2020-11-03 04:01:29 +00:00
|
|
|
substacks[key] = substack.Publication(value['url'])
|
2020-11-03 22:08:50 +00:00
|
|
|
categories = {}
|
|
|
|
for key, value in settings.CATEGORY.items():
|
2020-11-05 03:30:55 +00:00
|
|
|
categories[key] = news.Category(value['url'], value.get('tz'))
|
2020-11-03 04:01:29 +00:00
|
|
|
sitemaps = {}
|
2020-11-03 04:04:46 +00:00
|
|
|
for key, value in settings.SITEMAP.items():
|
2020-11-05 03:30:55 +00:00
|
|
|
sitemaps[key] = news.Sitemap(value['url'], value.get('tz'))
|
2020-11-02 23:28:39 +00:00
|
|
|
|
2019-08-24 08:49:11 +00:00
|
|
|
def list():
|
|
|
|
feed = []
|
2020-11-02 02:26:54 +00:00
|
|
|
if settings.NUM_HACKERNEWS:
|
|
|
|
feed += [(x, 'hackernews') for x in hackernews.feed()[:settings.NUM_HACKERNEWS]]
|
|
|
|
|
|
|
|
if settings.NUM_REDDIT:
|
|
|
|
feed += [(x, 'reddit') for x in reddit.feed()[:settings.NUM_REDDIT]]
|
|
|
|
|
|
|
|
if settings.NUM_TILDES:
|
|
|
|
feed += [(x, 'tildes') for x in tildes.feed()[:settings.NUM_TILDES]]
|
|
|
|
|
2020-11-03 03:44:02 +00:00
|
|
|
if settings.NUM_SUBSTACK:
|
|
|
|
feed += [(x, 'substack') for x in substack.top.feed()[:settings.NUM_SUBSTACK]]
|
|
|
|
|
2020-11-03 04:04:46 +00:00
|
|
|
for key, publication in substacks.items():
|
2020-11-03 10:31:36 +00:00
|
|
|
count = settings.SUBSTACK[key]['count']
|
2020-11-03 04:01:29 +00:00
|
|
|
feed += [(x, key) for x in publication.feed()[:count]]
|
2020-11-03 03:44:02 +00:00
|
|
|
|
2020-11-03 22:08:50 +00:00
|
|
|
for key, sites in categories.items():
|
2020-11-05 02:51:59 +00:00
|
|
|
count = settings.CATEGORY[key].get('count') or 0
|
|
|
|
excludes = settings.CATEGORY[key].get('excludes')
|
2020-11-05 03:30:55 +00:00
|
|
|
tz = settings.CATEGORY[key].get('tz')
|
2020-11-05 02:51:59 +00:00
|
|
|
feed += [(x, key) for x in sites.feed(excludes)[:count]]
|
2020-11-03 22:08:50 +00:00
|
|
|
|
2020-11-03 04:04:46 +00:00
|
|
|
for key, sites in sitemaps.items():
|
2020-11-05 02:51:59 +00:00
|
|
|
count = settings.SITEMAP[key].get('count') or 0
|
|
|
|
excludes = settings.SITEMAP[key].get('excludes')
|
|
|
|
feed += [(x, key) for x in sites.feed(excludes)[:count]]
|
2020-11-03 03:44:02 +00:00
|
|
|
|
|
|
|
|
2019-08-24 08:49:11 +00:00
|
|
|
return feed
|
|
|
|
|
|
|
|
def get_article(url):
|
2020-11-04 02:47:12 +00:00
|
|
|
scrapers = {
|
|
|
|
'declutter': declutter,
|
|
|
|
'outline': outline,
|
|
|
|
'local': local,
|
|
|
|
}
|
|
|
|
available = settings.SCRAPERS or ['local']
|
|
|
|
if 'local' not in available:
|
|
|
|
available += ['local']
|
|
|
|
|
|
|
|
for scraper in available:
|
|
|
|
if scraper not in scrapers.keys():
|
|
|
|
continue
|
|
|
|
try:
|
|
|
|
html = scrapers[scraper].get_html(url)
|
|
|
|
if html:
|
|
|
|
return html
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
raise
|
|
|
|
except:
|
|
|
|
pass
|
2020-11-04 02:00:58 +00:00
|
|
|
return ''
|
2019-08-24 08:49:11 +00:00
|
|
|
|
2020-06-25 23:36:47 +00:00
|
|
|
def get_content_type(url):
|
|
|
|
try:
|
2020-11-03 20:27:43 +00:00
|
|
|
headers = {
|
|
|
|
'User-Agent': 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
|
|
|
|
'X-Forwarded-For': '66.249.66.1',
|
|
|
|
}
|
|
|
|
return requests.get(url, headers=headers, timeout=5).headers['content-type']
|
2020-06-25 23:36:47 +00:00
|
|
|
except:
|
2020-07-04 00:25:41 +00:00
|
|
|
pass
|
|
|
|
|
|
|
|
try:
|
|
|
|
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0'}
|
2020-08-14 03:57:43 +00:00
|
|
|
return requests.get(url, headers=headers, timeout=10).headers['content-type']
|
2020-07-04 00:25:41 +00:00
|
|
|
except:
|
2020-08-14 03:57:43 +00:00
|
|
|
return ''
|
2020-06-25 23:36:47 +00:00
|
|
|
|
2019-12-15 22:47:33 +00:00
|
|
|
def update_story(story, is_manual=False):
|
2019-08-24 08:49:11 +00:00
|
|
|
res = {}
|
|
|
|
|
|
|
|
if story['source'] == 'hackernews':
|
|
|
|
res = hackernews.story(story['ref'])
|
2019-08-24 21:37:43 +00:00
|
|
|
elif story['source'] == 'reddit':
|
|
|
|
res = reddit.story(story['ref'])
|
2019-08-25 00:36:26 +00:00
|
|
|
elif story['source'] == 'tildes':
|
|
|
|
res = tildes.story(story['ref'])
|
2020-11-02 23:28:39 +00:00
|
|
|
elif story['source'] == 'substack':
|
|
|
|
res = substack.top.story(story['ref'])
|
2020-11-03 22:08:50 +00:00
|
|
|
elif story['source'] in categories.keys():
|
|
|
|
res = categories[story['source']].story(story['ref'])
|
2020-11-03 04:01:29 +00:00
|
|
|
elif story['source'] in sitemaps.keys():
|
|
|
|
res = sitemaps[story['source']].story(story['ref'])
|
|
|
|
elif story['source'] in substacks.keys():
|
|
|
|
res = substacks[story['source']].story(story['ref'])
|
2019-11-08 05:55:30 +00:00
|
|
|
elif story['source'] == 'manual':
|
|
|
|
res = manual.story(story['ref'])
|
2019-08-24 08:49:11 +00:00
|
|
|
|
|
|
|
if res:
|
2019-09-24 08:22:06 +00:00
|
|
|
story.update(res) # join dicts
|
|
|
|
else:
|
2020-07-04 00:25:41 +00:00
|
|
|
logging.info('Story not ready yet')
|
2019-09-24 08:22:06 +00:00
|
|
|
return False
|
|
|
|
|
2020-11-09 01:36:51 +00:00
|
|
|
if story['date'] and not is_manual and story['date'] + MAX_AGE_IN_DAYS < time.time():
|
2020-07-04 00:25:41 +00:00
|
|
|
logging.info('Story too old, removing')
|
2019-11-08 21:50:33 +00:00
|
|
|
return False
|
|
|
|
|
2019-08-24 08:49:11 +00:00
|
|
|
if story.get('url', '') and not story.get('text', ''):
|
2020-06-25 23:36:47 +00:00
|
|
|
if not get_content_type(story['url']).startswith('text/'):
|
|
|
|
logging.info('URL invalid file type / content type:')
|
|
|
|
logging.info(story['url'])
|
2019-10-15 21:03:47 +00:00
|
|
|
return False
|
|
|
|
|
|
|
|
if any([domain in story['url'] for domain in INVALID_DOMAINS]):
|
2020-06-25 23:36:47 +00:00
|
|
|
logging.info('URL invalid domain:')
|
|
|
|
logging.info(story['url'])
|
2019-10-15 21:03:47 +00:00
|
|
|
return False
|
2019-09-24 08:22:06 +00:00
|
|
|
|
|
|
|
logging.info('Getting article ' + story['url'])
|
|
|
|
story['text'] = get_article(story['url'])
|
|
|
|
if not story['text']: return False
|
|
|
|
|
|
|
|
return True
|
2019-08-28 04:13:02 +00:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
2019-10-08 08:00:50 +00:00
|
|
|
#test_news_cache = {}
|
|
|
|
#nid = 'jean'
|
|
|
|
#ref = 20802050
|
|
|
|
#source = 'hackernews'
|
|
|
|
#test_news_cache[nid] = dict(id=nid, ref=ref, source=source)
|
|
|
|
#news_story = test_news_cache[nid]
|
|
|
|
#update_story(news_story)
|
|
|
|
|
2019-10-19 07:33:06 +00:00
|
|
|
#print(get_article('https://www.bloomberg.com/news/articles/2019-09-23/xi-s-communists-under-pressure-as-high-prices-hit-china-workers'))
|
|
|
|
|
|
|
|
a = get_article('https://blog.joinmastodon.org/2019/10/mastodon-3.0/')
|
|
|
|
print(a)
|
2019-10-08 08:00:50 +00:00
|
|
|
|
2019-08-28 04:13:02 +00:00
|
|
|
print('done')
|