qotnews/apiserver/feeds/tildes.py

151 lines
4.7 KiB
Python
Raw Normal View History

2019-08-25 00:36:26 +00:00
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
2019-08-25 00:36:26 +00:00
2019-12-01 22:18:41 +00:00
if __name__ == '__main__':
import sys
sys.path.insert(0,'.')
2019-08-25 00:36:26 +00:00
import requests
from bs4 import BeautifulSoup
from datetime import datetime
2019-12-01 22:18:41 +00:00
from utils import clean
# cache the topic groups to prevent redirects
group_lookup = {}
2019-08-25 00:36:26 +00:00
USER_AGENT = 'qotnews scraper (github:tannercollin)'
API_TOPSTORIES = lambda : 'https://tildes.net'
API_ITEM = lambda x : 'https://tildes.net/shortener/{}'.format(x)
2019-08-25 00:36:26 +00:00
SITE_LINK = lambda group, ref : 'https://tildes.net/{}/{}'.format(group, ref)
2019-08-25 00:36:26 +00:00
SITE_AUTHOR_LINK = lambda x : 'https://tildes.net/user/{}'.format(x)
def api(route):
2019-08-25 00:36:26 +00:00
try:
headers = {'User-Agent': USER_AGENT}
r = requests.get(route, headers=headers, timeout=5)
2019-08-25 00:36:26 +00:00
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
2019-08-25 00:36:26 +00:00
return r.text
except KeyboardInterrupt:
raise
2019-08-25 00:36:26 +00:00
except BaseException as e:
2021-09-06 00:21:05 +00:00
logging.critical('Problem hitting tildes website: {}'.format(str(e)))
2019-08-25 00:36:26 +00:00
return False
def feed():
html = api(API_TOPSTORIES())
if not html: return []
soup = BeautifulSoup(html, features='html.parser')
2019-08-25 00:36:26 +00:00
articles = soup.find('ol', class_='topic-listing').findAll('article')
return [x['id'].split('-')[1] for x in articles] or []
2019-08-25 00:36:26 +00:00
def unix(date_str):
return int(datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%SZ').timestamp())
def comment(i):
i = i.article
2019-08-30 06:23:14 +00:00
if i.find('div', class_='is-comment-removed'):
return False
if i.find('div', class_='is-comment-deleted'):
return False
2019-08-30 06:23:14 +00:00
2019-08-25 00:36:26 +00:00
c = {}
lu = i.find('a', class_='link-user')
c['author'] = str(lu.string if lu else 'unknown user')
2019-08-25 00:36:26 +00:00
c['score'] = 1
c['date'] = unix(i.find('time')['datetime'])
2019-12-01 22:18:41 +00:00
c['text'] = clean(i.find('div', class_='comment-text').encode_contents().decode() or '')
2019-08-25 07:46:22 +00:00
ct = i.find('ol', class_='comment-tree')
c['comments'] = [comment(j) for j in ct.findAll('li', recursive=False)] if ct else []
2019-08-30 06:23:14 +00:00
c['comments'] = list(filter(bool, c['comments']))
2019-08-25 00:36:26 +00:00
return c
def story(ref):
if ref in group_lookup:
html = api(SITE_LINK(group_lookup[ref], ref))
else:
html = api(API_ITEM(ref))
if not html:
logging.info('Bad Tildes API response.')
return False
2019-08-25 00:36:26 +00:00
soup = BeautifulSoup(html, features='html.parser')
a = soup.find('article', class_='topic-full')
if a is None:
logging.info('Tildes <article> element not found.')
return False
2019-08-25 00:36:26 +00:00
h = a.find('header')
lu = h.find('a', class_='link-user')
error = a.find('div', class_='text-error')
if error:
if 'deleted' in error.string or 'removed' in error.string:
logging.info('Article was deleted or removed.')
return False
2019-08-25 00:36:26 +00:00
s = {}
s['author'] = str(lu.string if lu else 'unknown user')
2019-08-25 00:36:26 +00:00
s['author_link'] = SITE_AUTHOR_LINK(s['author'])
s['score'] = int(h.find('span', class_='topic-voting-votes').string)
s['date'] = unix(h.find('time')['datetime'])
s['title'] = str(h.h1.string)
2020-06-25 23:34:49 +00:00
s['group'] = str(soup.find('div', class_='site-header-context').a.string)
group_lookup[ref] = s['group']
s['link'] = SITE_LINK(s['group'], ref)
2019-08-25 00:36:26 +00:00
ud = a.find('div', class_='topic-full-link')
s['url'] = ud.a['href'] if ud else s['link']
2019-08-25 07:46:22 +00:00
sc = a.find('ol', id='comments')
s['comments'] = [comment(i) for i in sc.findAll('li', recursive=False)]
2019-08-30 06:23:14 +00:00
s['comments'] = list(filter(bool, s['comments']))
2019-08-25 00:36:26 +00:00
ch = a.find('header', class_='topic-comments-header')
s['num_comments'] = int(ch.h2.string.split(' ')[0]) if ch else 0
2023-07-13 22:54:36 +00:00
if s['group'].split('.')[0] not in [
'~arts',
'~comp',
'~creative',
'~design',
'~engineering',
'~finance',
'~science',
'~tech',
]:
logging.info('Group ({}) not in whitelist.'.format(s['group']))
return False
2024-03-08 03:08:18 +00:00
if s['score'] < 15 and s['num_comments'] < 10:
logging.info('Score ({}) or num comments ({}) below threshold.'.format(s['score'], s['num_comments']))
return False
2019-08-25 00:36:26 +00:00
td = a.find('div', class_='topic-full-text')
if td:
2019-12-01 22:18:41 +00:00
s['text'] = clean(td.encode_contents().decode() or '')
2019-08-25 00:36:26 +00:00
return s
# scratchpad so I can quickly develop the parser
2019-08-25 00:36:26 +00:00
if __name__ == '__main__':
2021-09-06 00:21:05 +00:00
print(feed())
2019-08-30 06:23:14 +00:00
#normal = story('gxt')
#print(normal)
#no_comments = story('gxr')
#print(no_comments)
#self_post = story('gsb')
#print(self_post)
#li_comment = story('gqx')
#print(li_comment)
2021-09-06 00:21:05 +00:00
#broken = story('q4y')
#print(broken)
2019-08-25 00:36:26 +00:00
# make sure there's no self-reference
2019-08-30 06:23:14 +00:00
#import copy
#for x in [normal, no_comments, self_post, li_comment]:
# _ = copy.deepcopy(x)