Compare commits

..

55 Commits

Author SHA1 Message Date
9ec61ea5bc Ignore dead and political stories 2025-05-27 18:47:17 +00:00
bdc7a6c10d Fix Better HN api content extraction 2025-02-01 22:39:13 +00:00
4858516b01 Add Better HN as an API backup 2025-02-01 21:42:06 +00:00
f10e6063fc Bug fixes 2025-02-01 20:31:35 +00:00
249a616531 Alert on story update error 2024-03-16 20:41:24 +00:00
ab92bd5441 Adjust score and comment thresholds 2024-03-08 03:08:18 +00:00
6b16a768a7 Fix deletion script 2024-03-08 03:08:03 +00:00
57de076fec Increase database timeout 2024-02-27 18:48:56 +00:00
074b898508 Fix lobsters comment parsing 2024-02-27 18:47:00 +00:00
f049d194ab Move scripts into own folder 2024-02-27 18:32:29 +00:00
c2b9a1cb7a Update readability 2024-02-27 18:32:19 +00:00
4435f49e17 Make "dark" theme grey, add "black" theme 2023-09-13 01:19:47 +00:00
494d89ac30 Disable lobsters 2023-09-13 01:02:15 +00:00
e79fca6ecc Replace "indent_level" with "depth" in lobsters API
See:
fe09e5aa31
2023-08-31 07:35:44 +00:00
c65fb69092 Handle Lobsters comment parsing TypeErrors
Too lazy to debug this:

2023-08-29 12:56:35,111 - root - INFO - Updating lobsters story: yktkwr, index: 55
Traceback (most recent call last):
  File "src/gevent/greenlet.py", line 854, in gevent._gevent_cgreenlet.Greenlet.run
  File "/home/tanner/qotnews/apiserver/server.py", line 194, in feed_thread
    valid = feed.update_story(story)
  File "/home/tanner/qotnews/apiserver/feed.py", line 74, in update_story
    res = lobsters.story(story['ref'])
  File "/home/tanner/qotnews/apiserver/feeds/lobsters.py", line 103, in story
    s['comments'] = iter_comments(r['comments'])
  File "/home/tanner/qotnews/apiserver/feeds/lobsters.py", line 76, in iter_comments
    parent_stack = parent_stack[:indent-1]
TypeError: unsupported operand type(s) for -: 'NoneType' and 'int'
2023-08-29T12:56:35Z <Greenlet at 0x7f92ad840ae0: feed_thread> failed with TypeError
2023-08-31 07:30:39 +00:00
632d028e4c Add Tildes group whitelist 2023-07-13 22:54:36 +00:00
ea8e9e5a23 Increase again 2023-06-13 17:11:50 +00:00
2838ea9b41 Increase Tildes story score requirement 2023-06-11 01:01:31 +00:00
f15d108971 Catch all possible Reddit API exceptions 2023-03-15 21:16:37 +00:00
f777348af8 Fix darkmode fullscreen button color 2022-08-11 19:36:36 +00:00
486404a413 Fix fix-stories bug 2022-08-10 04:06:39 +00:00
7c9c07a4cf Hide fullscreen button if it's not available 2022-08-10 04:05:25 +00:00
08d02f6013 Add fullscreen mode 2022-08-08 23:21:49 +00:00
1b54342702 Add red theme 2022-08-08 20:14:57 +00:00
9e9571a3c0 Write fixed stories to database 2022-07-05 00:57:56 +00:00
dc83a70887 Begin script to fix bad gzip text 2022-07-04 20:32:01 +00:00
2e2c9ae837 Move FEED_LENGTH to settings.py, use for search results 2022-07-04 19:08:24 +00:00
61021d8f91 Small UI changes 2022-07-04 19:08:24 +00:00
e65047fead Add accept gzip header to readability server 2022-07-04 19:07:31 +00:00
8e775c189f Add test file 2022-07-04 05:56:06 +00:00
3d9274309a Fix requests text encoding slowness 2022-07-04 05:55:52 +00:00
7bdbbf10b2 Return search results directly from the server 2022-07-04 04:33:01 +00:00
6aa0f78536 Remove Article / Comments, etc thing after name 2022-07-04 04:33:01 +00:00
bf3663bbec Remove hard-coded title 2022-06-30 00:12:22 +00:00
e6589dc61c Adjust title 2022-06-30 00:05:15 +00:00
307e8349f3 Change header based on page 2022-06-30 00:00:30 +00:00
04cd56daa8 Add index / noindex to client 2022-06-29 23:30:39 +00:00
c80769def6 Add noindex meta tag to stories 2022-06-29 23:20:53 +00:00
ebd1ad2140 Increase database timeout 2022-06-24 20:50:27 +00:00
2cc7dd0d6d Update software 2022-05-31 04:24:12 +00:00
6e7cb86d2e Explain no javascript 2022-05-31 04:23:52 +00:00
a25457254f Improve logging, sends tweets to nitter.net 2022-03-05 23:48:46 +00:00
a693ea5342 Remove outline API 2022-03-05 22:05:29 +00:00
7386e1d8b0 Include option to disable readerserver 2022-03-05 22:04:25 +00:00
f8e8597e3a Include option to disable search 2022-03-05 21:58:35 +00:00
55c282ee69 Fix search to work with low-RAM server 2022-03-05 21:33:07 +00:00
3f774a9e38 Improve logging 2021-09-06 00:21:05 +00:00
dcedd4caa1 Add script to reindex search, abstract search API 2021-09-06 00:20:21 +00:00
7a131ebd03 Change the order by which content-type is grabbed 2021-01-30 06:36:02 +00:00
6f64401785 Add optional skip and limit to API route 2021-01-18 03:59:33 +00:00
3ff917e806 Remove colons from date string so Python 3.5 can parse 2020-12-15 23:19:50 +00:00
c9fb9bd5df Add Lobsters to feed 2020-12-12 05:26:33 +00:00
fd9c9c888d Update gitignore 2020-12-11 23:49:45 +00:00
42dcf15374 Increase sqlite lock timeout 2020-11-19 21:38:18 +00:00
d8a0b77765 Blacklist sec.gov website 2020-11-19 21:37:59 +00:00
34 changed files with 1153 additions and 505 deletions

View File

@@ -109,4 +109,5 @@ settings.py
data.db data.db
data.db.bak data.db.bak
data/archive/* data/archive/*
data/backup/*
qotnews.sqlite qotnews.sqlite

View File

@@ -5,7 +5,7 @@ from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import IntegrityError from sqlalchemy.exc import IntegrityError
engine = create_engine('sqlite:///data/qotnews.sqlite') engine = create_engine('sqlite:///data/qotnews.sqlite', connect_args={'timeout': 360})
Session = sessionmaker(bind=engine) Session = sessionmaker(bind=engine)
Base = declarative_base() Base = declarative_base()
@@ -68,12 +68,13 @@ def get_reflist(amount):
q = session.query(Reflist).order_by(Reflist.rid.desc()).limit(amount) q = session.query(Reflist).order_by(Reflist.rid.desc()).limit(amount)
return [dict(ref=x.ref, sid=x.sid, source=x.source) for x in q.all()] return [dict(ref=x.ref, sid=x.sid, source=x.source) for x in q.all()]
def get_stories(amount): def get_stories(amount, skip=0):
session = Session() session = Session()
q = session.query(Reflist, Story.meta_json).\ q = session.query(Reflist, Story.meta_json).\
order_by(Reflist.rid.desc()).\ order_by(Reflist.rid.desc()).\
join(Story).\ join(Story).\
filter(Story.title != None).\ filter(Story.title != None).\
offset(skip).\
limit(amount) limit(amount)
return [x[1] for x in q] return [x[1] for x in q]
@@ -100,7 +101,22 @@ def del_ref(ref):
finally: finally:
session.close() session.close()
def count_stories():
try:
session = Session()
return session.query(Story).count()
finally:
session.close()
def get_story_list():
try:
session = Session()
return session.query(Story.sid).all()
finally:
session.close()
if __name__ == '__main__': if __name__ == '__main__':
init() init()
print(get_story_by_ref('hgi3sy')) #print(get_story_by_ref('hgi3sy'))
print(len(get_reflist(99999)))

View File

@@ -8,12 +8,10 @@ import time
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import settings import settings
from feeds import hackernews, reddit, tildes, manual from feeds import hackernews, reddit, tildes, manual, lobsters
import utils
OUTLINE_API = 'https://api.outline.com/v3/parse_article' INVALID_DOMAINS = ['youtube.com', 'bloomberg.com', 'wsj.com', 'sec.gov']
READ_API = 'http://127.0.0.1:33843'
INVALID_DOMAINS = ['youtube.com', 'bloomberg.com', 'wsj.com']
TWO_DAYS = 60*60*24*2 TWO_DAYS = 60*60*24*2
def list(): def list():
@@ -21,6 +19,9 @@ def list():
if settings.NUM_HACKERNEWS: if settings.NUM_HACKERNEWS:
feed += [(x, 'hackernews') for x in hackernews.feed()[:settings.NUM_HACKERNEWS]] feed += [(x, 'hackernews') for x in hackernews.feed()[:settings.NUM_HACKERNEWS]]
if settings.NUM_LOBSTERS:
feed += [(x, 'lobsters') for x in lobsters.feed()[:settings.NUM_LOBSTERS]]
if settings.NUM_REDDIT: if settings.NUM_REDDIT:
feed += [(x, 'reddit') for x in reddit.feed()[:settings.NUM_REDDIT]] feed += [(x, 'reddit') for x in reddit.feed()[:settings.NUM_REDDIT]]
@@ -30,29 +31,16 @@ def list():
return feed return feed
def get_article(url): def get_article(url):
try: if not settings.READER_URL:
params = {'source_url': url} logging.info('Readerserver not configured, aborting.')
headers = {'Referer': 'https://outline.com/'} return ''
r = requests.get(OUTLINE_API, params=params, headers=headers, timeout=20)
if r.status_code == 429:
logging.info('Rate limited by outline, sleeping 30s and skipping...')
time.sleep(30)
return ''
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
html = r.json()['data']['html']
if 'URL is not supported by Outline' in html:
raise Exception('URL not supported by Outline')
return html
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem outlining article: {}'.format(str(e)))
logging.info('Trying our server instead...') if url.startswith('https://twitter.com'):
logging.info('Replacing twitter.com url with nitter.net')
url = url.replace('twitter.com', 'nitter.net')
try: try:
r = requests.post(READ_API, data=dict(url=url), timeout=20) r = requests.post(settings.READER_URL, data=dict(url=url), timeout=20)
if r.status_code != 200: if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code)) raise Exception('Bad response code ' + str(r.status_code))
return r.text return r.text
@@ -63,32 +51,39 @@ def get_article(url):
return '' return ''
def get_content_type(url): def get_content_type(url):
try:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0'}
return requests.get(url, headers=headers, timeout=5).headers['content-type']
except:
return ''
try: try:
headers = { headers = {
'User-Agent': 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'User-Agent': 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'X-Forwarded-For': '66.249.66.1', 'X-Forwarded-For': '66.249.66.1',
} }
return requests.get(url, headers=headers, timeout=5).headers['content-type']
except:
pass
try:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0'}
return requests.get(url, headers=headers, timeout=10).headers['content-type'] return requests.get(url, headers=headers, timeout=10).headers['content-type']
except: except:
return '' pass
def update_story(story, is_manual=False): def update_story(story, is_manual=False):
res = {} res = {}
if story['source'] == 'hackernews': try:
res = hackernews.story(story['ref']) if story['source'] == 'hackernews':
elif story['source'] == 'reddit': res = hackernews.story(story['ref'])
res = reddit.story(story['ref']) elif story['source'] == 'lobsters':
elif story['source'] == 'tildes': res = lobsters.story(story['ref'])
res = tildes.story(story['ref']) elif story['source'] == 'reddit':
elif story['source'] == 'manual': res = reddit.story(story['ref'])
res = manual.story(story['ref']) elif story['source'] == 'tildes':
res = tildes.story(story['ref'])
elif story['source'] == 'manual':
res = manual.story(story['ref'])
except BaseException as e:
utils.alert_tanner('Problem updating {} story, ref {}: {}'.format(story['source'], story['ref'], str(e)))
logging.exception(e)
return False
if res: if res:
story.update(res) # join dicts story.update(res) # join dicts
@@ -97,7 +92,7 @@ def update_story(story, is_manual=False):
return False return False
if story['date'] and not is_manual and story['date'] + TWO_DAYS < time.time(): if story['date'] and not is_manual and story['date'] + TWO_DAYS < time.time():
logging.info('Story too old, removing') logging.info('Story too old, removing. Date: {}'.format(story['date']))
return False return False
if story.get('url', '') and not story.get('text', ''): if story.get('url', '') and not story.get('text', ''):
@@ -111,6 +106,12 @@ def update_story(story, is_manual=False):
logging.info(story['url']) logging.info(story['url'])
return False return False
if 'trump' in story['title'].lower() or 'musk' in story['title'].lower():
logging.info('Trump / Musk story, skipping')
logging.info(story['url'])
return False
logging.info('Getting article ' + story['url']) logging.info('Getting article ' + story['url'])
story['text'] = get_article(story['url']) story['text'] = get_article(story['url'])
if not story['text']: return False if not story['text']: return False
@@ -128,7 +129,7 @@ if __name__ == '__main__':
#print(get_article('https://www.bloomberg.com/news/articles/2019-09-23/xi-s-communists-under-pressure-as-high-prices-hit-china-workers')) #print(get_article('https://www.bloomberg.com/news/articles/2019-09-23/xi-s-communists-under-pressure-as-high-prices-hit-china-workers'))
a = get_article('https://blog.joinmastodon.org/2019/10/mastodon-3.0/') a = get_content_type('https://tefkos.comminfo.rutgers.edu/Courses/e530/Readings/Beal%202008%20full%20text%20searching.pdf')
print(a) print(a)
print('done') print('done')

View File

@@ -12,7 +12,8 @@ import requests
from utils import clean from utils import clean
API_TOPSTORIES = lambda x: 'https://hacker-news.firebaseio.com/v0/topstories.json' API_TOPSTORIES = lambda x: 'https://hacker-news.firebaseio.com/v0/topstories.json'
API_ITEM = lambda x : 'https://hn.algolia.com/api/v1/items/{}'.format(x) ALG_API_ITEM = lambda x : 'https://hn.algolia.com/api/v1/items/{}'.format(x)
BHN_API_ITEM = lambda x : 'https://api.hnpwa.com/v0/item/{}.json'.format(x)
SITE_LINK = lambda x : 'https://news.ycombinator.com/item?id={}'.format(x) SITE_LINK = lambda x : 'https://news.ycombinator.com/item?id={}'.format(x)
SITE_AUTHOR_LINK = lambda x : 'https://news.ycombinator.com/user?id={}'.format(x) SITE_AUTHOR_LINK = lambda x : 'https://news.ycombinator.com/user?id={}'.format(x)
@@ -42,7 +43,7 @@ def api(route, ref=None):
def feed(): def feed():
return [str(x) for x in api(API_TOPSTORIES) or []] return [str(x) for x in api(API_TOPSTORIES) or []]
def comment(i): def alg_comment(i):
if 'author' not in i: if 'author' not in i:
return False return False
@@ -51,21 +52,25 @@ def comment(i):
c['score'] = i.get('points', 0) c['score'] = i.get('points', 0)
c['date'] = i.get('created_at_i', 0) c['date'] = i.get('created_at_i', 0)
c['text'] = clean(i.get('text', '') or '') c['text'] = clean(i.get('text', '') or '')
c['comments'] = [comment(j) for j in i['children']] c['comments'] = [alg_comment(j) for j in i['children']]
c['comments'] = list(filter(bool, c['comments'])) c['comments'] = list(filter(bool, c['comments']))
return c return c
def comment_count(i): def alg_comment_count(i):
alive = 1 if i['author'] else 0 alive = 1 if i['author'] else 0
return sum([comment_count(c) for c in i['comments']]) + alive return sum([alg_comment_count(c) for c in i['comments']]) + alive
def story(ref): def alg_story(ref):
r = api(API_ITEM, ref) r = api(ALG_API_ITEM, ref)
if not r: return False if not r:
logging.info('Bad Algolia Hackernews API response.')
return None
if 'deleted' in r: if 'deleted' in r:
logging.info('Story was deleted.')
return False return False
elif r.get('type', '') != 'story': elif r.get('type', '') != 'story':
logging.info('Type "{}" is not "story".'.format(r.get('type', '')))
return False return False
s = {} s = {}
@@ -76,17 +81,85 @@ def story(ref):
s['title'] = r.get('title', '') s['title'] = r.get('title', '')
s['link'] = SITE_LINK(ref) s['link'] = SITE_LINK(ref)
s['url'] = r.get('url', '') s['url'] = r.get('url', '')
s['comments'] = [comment(i) for i in r['children']] s['comments'] = [alg_comment(i) for i in r['children']]
s['comments'] = list(filter(bool, s['comments'])) s['comments'] = list(filter(bool, s['comments']))
s['num_comments'] = comment_count(s) - 1 s['num_comments'] = alg_comment_count(s) - 1
if 'text' in r and r['text']: if 'text' in r and r['text']:
s['text'] = clean(r['text'] or '') s['text'] = clean(r['text'] or '')
return s return s
def bhn_comment(i):
if 'user' not in i:
return False
c = {}
c['author'] = i.get('user', '')
c['score'] = 0 # Not present?
c['date'] = i.get('time', 0)
c['text'] = clean(i.get('content', '') or '')
c['comments'] = [bhn_comment(j) for j in i['comments']]
c['comments'] = list(filter(bool, c['comments']))
return c
def bhn_story(ref):
r = api(BHN_API_ITEM, ref)
if not r:
logging.info('Bad BetterHN Hackernews API response.')
return None
if 'deleted' in r: # TODO: verify
logging.info('Story was deleted.')
return False
elif r.get('dead', False):
logging.info('Story was deleted.')
return False
elif r.get('type', '') != 'link':
logging.info('Type "{}" is not "link".'.format(r.get('type', '')))
return False
s = {}
s['author'] = r.get('user', '')
s['author_link'] = SITE_AUTHOR_LINK(r.get('user', ''))
s['score'] = r.get('points', 0)
s['date'] = r.get('time', 0)
s['title'] = r.get('title', '')
s['link'] = SITE_LINK(ref)
s['url'] = r.get('url', '')
if s['url'].startswith('item'):
s['url'] = SITE_LINK(ref)
s['comments'] = [bhn_comment(i) for i in r['comments']]
s['comments'] = list(filter(bool, s['comments']))
s['num_comments'] = r.get('comments_count', 0)
if 'content' in r and r['content']:
s['text'] = clean(r['content'] or '')
return s
def story(ref):
s = alg_story(ref)
if s is None:
s = bhn_story(ref)
if not s:
return False
if s['score'] < 25 and s['num_comments'] < 10:
logging.info('Score ({}) or num comments ({}) below threshold.'.format(s['score'], s['num_comments']))
return False
return s
# scratchpad so I can quickly develop the parser # scratchpad so I can quickly develop the parser
if __name__ == '__main__': if __name__ == '__main__':
print(feed()) print(feed())
#print(story(20763961)) #print(story(20763961))
#print(story(20802050)) #print(story(20802050))
#print(story(42899834)) # type "job"
#print(story(42900076)) # Ask HN
#print(story(42898201)) # Show HN
#print(story(42899703)) # normal
print(story(42902678)) # bad title?

120
apiserver/feeds/lobsters.py Normal file
View File

@@ -0,0 +1,120 @@
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
if __name__ == '__main__':
import sys
sys.path.insert(0,'.')
import requests
from datetime import datetime
from utils import clean
API_HOTTEST = lambda x: 'https://lobste.rs/hottest.json'
API_ITEM = lambda x : 'https://lobste.rs/s/{}.json'.format(x)
SITE_LINK = lambda x : 'https://lobste.rs/s/{}'.format(x)
SITE_AUTHOR_LINK = lambda x : 'https://lobste.rs/u/{}'.format(x)
def api(route, ref=None):
try:
r = requests.get(route(ref), timeout=5)
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem hitting lobsters API: {}, trying again'.format(str(e)))
try:
r = requests.get(route(ref), timeout=15)
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem hitting lobsters API: {}'.format(str(e)))
return False
def feed():
return [x['short_id'] for x in api(API_HOTTEST) or []]
def unix(date_str):
date_str = date_str.replace(':', '')
return int(datetime.strptime(date_str, '%Y-%m-%dT%H%M%S.%f%z').timestamp())
def make_comment(i):
c = {}
try:
c['author'] = i['commenting_user']
except KeyError:
c['author'] = ''
c['score'] = i.get('score', 0)
try:
c['date'] = unix(i['created_at'])
except KeyError:
c['date'] = 0
c['text'] = clean(i.get('comment', '') or '')
c['comments'] = []
return c
def iter_comments(flat_comments):
nested_comments = []
parent_stack = []
for comment in flat_comments:
c = make_comment(comment)
indent = comment['depth']
if indent == 0:
nested_comments.append(c)
parent_stack = [c]
else:
parent_stack = parent_stack[:indent]
p = parent_stack[-1]
p['comments'].append(c)
parent_stack.append(c)
return nested_comments
def story(ref):
r = api(API_ITEM, ref)
if not r:
logging.info('Bad Lobsters API response.')
return False
s = {}
try:
s['author'] = r['submitter_user']
s['author_link'] = SITE_AUTHOR_LINK(s['author'])
except KeyError:
s['author'] = ''
s['author_link'] = ''
s['score'] = r.get('score', 0)
try:
s['date'] = unix(r['created_at'])
except KeyError:
s['date'] = 0
s['title'] = r.get('title', '')
s['link'] = SITE_LINK(ref)
s['url'] = r.get('url', '')
s['comments'] = iter_comments(r['comments'])
s['num_comments'] = r['comment_count']
if s['score'] < 15 and s['num_comments'] < 10:
logging.info('Score ({}) or num comments ({}) below threshold.'.format(s['score'], s['num_comments']))
return False
if 'description' in r and r['description']:
s['text'] = clean(r['description'] or '')
return s
# scratchpad so I can quickly develop the parser
if __name__ == '__main__':
#print(feed())
import json
print(json.dumps(story('fzvd1v'), indent=4))
#print(json.dumps(story('ixyv5u'), indent=4))

View File

@@ -27,7 +27,9 @@ def api(route):
def story(ref): def story(ref):
html = api(ref) html = api(ref)
if not html: return False if not html:
logging.info('Bad http GET response.')
return False
soup = BeautifulSoup(html, features='html.parser') soup = BeautifulSoup(html, features='html.parser')

View File

@@ -32,11 +32,8 @@ def feed():
return [x.id for x in reddit.subreddit(subs).hot()] return [x.id for x in reddit.subreddit(subs).hot()]
except KeyboardInterrupt: except KeyboardInterrupt:
raise raise
except PRAWException as e: except BaseException as e:
logging.error('Problem hitting reddit API: {}'.format(str(e))) logging.critical('Problem hitting reddit API: {}'.format(str(e)))
return []
except PrawcoreException as e:
logging.error('Problem hitting reddit API: {}'.format(str(e)))
return [] return []
def comment(i): def comment(i):
@@ -59,7 +56,9 @@ def comment(i):
def story(ref): def story(ref):
try: try:
r = reddit.submission(ref) r = reddit.submission(ref)
if not r: return False if not r:
logging.info('Bad Reddit API response.')
return False
s = {} s = {}
s['author'] = r.author.name if r.author else '[Deleted]' s['author'] = r.author.name if r.author else '[Deleted]'
@@ -74,6 +73,7 @@ def story(ref):
s['num_comments'] = r.num_comments s['num_comments'] = r.num_comments
if s['score'] < 25 and s['num_comments'] < 10: if s['score'] < 25 and s['num_comments'] < 10:
logging.info('Score ({}) or num comments ({}) below threshold.'.format(s['score'], s['num_comments']))
return False return False
if r.selftext: if r.selftext:
@@ -84,10 +84,10 @@ def story(ref):
except KeyboardInterrupt: except KeyboardInterrupt:
raise raise
except PRAWException as e: except PRAWException as e:
logging.error('Problem hitting reddit API: {}'.format(str(e))) logging.critical('Problem hitting reddit API: {}'.format(str(e)))
return False return False
except PrawcoreException as e: except PrawcoreException as e:
logging.error('Problem hitting reddit API: {}'.format(str(e))) logging.critical('Problem hitting reddit API: {}'.format(str(e)))
return False return False
# scratchpad so I can quickly develop the parser # scratchpad so I can quickly develop the parser

View File

@@ -34,7 +34,7 @@ def api(route):
except KeyboardInterrupt: except KeyboardInterrupt:
raise raise
except BaseException as e: except BaseException as e:
logging.error('Problem hitting tildes website: {}'.format(str(e))) logging.critical('Problem hitting tildes website: {}'.format(str(e)))
return False return False
def feed(): def feed():
@@ -71,11 +71,15 @@ def story(ref):
html = api(SITE_LINK(group_lookup[ref], ref)) html = api(SITE_LINK(group_lookup[ref], ref))
else: else:
html = api(API_ITEM(ref)) html = api(API_ITEM(ref))
if not html: return False if not html:
logging.info('Bad Tildes API response.')
return False
soup = BeautifulSoup(html, features='html.parser') soup = BeautifulSoup(html, features='html.parser')
a = soup.find('article', class_='topic-full') a = soup.find('article', class_='topic-full')
if a is None: return False if a is None:
logging.info('Tildes <article> element not found.')
return False
h = a.find('header') h = a.find('header')
lu = h.find('a', class_='link-user') lu = h.find('a', class_='link-user')
@@ -83,6 +87,7 @@ def story(ref):
error = a.find('div', class_='text-error') error = a.find('div', class_='text-error')
if error: if error:
if 'deleted' in error.string or 'removed' in error.string: if 'deleted' in error.string or 'removed' in error.string:
logging.info('Article was deleted or removed.')
return False return False
s = {} s = {}
@@ -102,7 +107,21 @@ def story(ref):
ch = a.find('header', class_='topic-comments-header') ch = a.find('header', class_='topic-comments-header')
s['num_comments'] = int(ch.h2.string.split(' ')[0]) if ch else 0 s['num_comments'] = int(ch.h2.string.split(' ')[0]) if ch else 0
if s['score'] < 8 and s['num_comments'] < 6: if s['group'].split('.')[0] not in [
'~arts',
'~comp',
'~creative',
'~design',
'~engineering',
'~finance',
'~science',
'~tech',
]:
logging.info('Group ({}) not in whitelist.'.format(s['group']))
return False
if s['score'] < 15 and s['num_comments'] < 10:
logging.info('Score ({}) or num comments ({}) below threshold.'.format(s['score'], s['num_comments']))
return False return False
td = a.find('div', class_='topic-full-text') td = a.find('div', class_='topic-full-text')
@@ -113,7 +132,7 @@ def story(ref):
# scratchpad so I can quickly develop the parser # scratchpad so I can quickly develop the parser
if __name__ == '__main__': if __name__ == '__main__':
#print(feed()) print(feed())
#normal = story('gxt') #normal = story('gxt')
#print(normal) #print(normal)
#no_comments = story('gxr') #no_comments = story('gxr')
@@ -122,8 +141,8 @@ if __name__ == '__main__':
#print(self_post) #print(self_post)
#li_comment = story('gqx') #li_comment = story('gqx')
#print(li_comment) #print(li_comment)
broken = story('q4y') #broken = story('q4y')
print(broken) #print(broken)
# make sure there's no self-reference # make sure there's no self-reference
#import copy #import copy

View File

@@ -1,6 +1,8 @@
import database import database
import search import search
import sys import sys
import settings
import logging
import json import json
import requests import requests
@@ -21,7 +23,7 @@ def database_del_story(sid):
def search_del_story(sid): def search_del_story(sid):
try: try:
r = requests.delete(search.MEILI_URL + 'indexes/qotnews/documents/'+sid, timeout=2) r = requests.delete(settings.MEILI_URL + 'indexes/qotnews/documents/'+sid, timeout=2)
if r.status_code != 202: if r.status_code != 202:
raise Exception('Bad response code ' + str(r.status_code)) raise Exception('Bad response code ' + str(r.status_code))
return r.json() return r.json()

View File

@@ -0,0 +1,58 @@
import time
import json
import logging
import feed
import database
import search
database.init()
def fix_gzip_bug(story_list):
FIX_THRESHOLD = 150
count = 1
for sid in story_list:
try:
sid = sid[0]
story = database.get_story(sid)
full_json = json.loads(story.full_json)
meta_json = json.loads(story.meta_json)
text = full_json.get('text', '')
count = text.count('<EFBFBD>')
if not count: continue
ratio = count / len(text) * 1000
print('Bad story:', sid, 'Num ?:', count, 'Ratio:', ratio)
if ratio < FIX_THRESHOLD: continue
print('Attempting to fix...')
valid = feed.update_story(meta_json, is_manual=True)
if valid:
database.put_story(meta_json)
search.put_story(meta_json)
print('Success')
else:
print('Story was not valid')
time.sleep(3)
except KeyboardInterrupt:
raise
except BaseException as e:
logging.exception(e)
breakpoint()
if __name__ == '__main__':
num_stories = database.count_stories()
print('Fix {} stories?'.format(num_stories))
print('Press ENTER to continue, ctrl-c to cancel')
input()
story_list = database.get_story_list()
fix_gzip_bug(story_list)

View File

@@ -0,0 +1,62 @@
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
import database
from sqlalchemy import select
import search
import sys
import time
import json
import requests
database.init()
search.init()
BATCH_SIZE = 5000
def put_stories(stories):
return search.meili_api(requests.post, 'indexes/qotnews/documents', stories)
def get_update(update_id):
return search.meili_api(requests.get, 'tasks/{}'.format(update_id))
if __name__ == '__main__':
num_stories = database.count_stories()
print('Reindex {} stories?'.format(num_stories))
print('Press ENTER to continue, ctrl-c to cancel')
input()
story_list = database.get_story_list()
count = 1
while len(story_list):
stories = []
for _ in range(BATCH_SIZE):
try:
sid = story_list.pop()
except IndexError:
break
story = database.get_story(sid)
print('Indexing {}/{} id: {} title: {}'.format(count, num_stories, sid[0], story.title))
story_obj = json.loads(story.meta_json)
stories.append(story_obj)
count += 1
res = put_stories(stories)
update_id = res['uid']
print('Waiting for processing', end='')
while get_update(update_id)['status'] != 'succeeded':
time.sleep(0.5)
print('.', end='', flush=True)
print()
print('Done.')

View File

@@ -0,0 +1,23 @@
import time
import requests
def test_search_api():
num_tests = 100
total_time = 0
for i in range(num_tests):
start = time.time()
res = requests.get('http://127.0.0.1:33842/api/search?q=iphone')
res.raise_for_status()
duration = time.time() - start
total_time += duration
avg_time = total_time / num_tests
print('Average search time:', avg_time)
if __name__ == '__main__':
test_search_api()

View File

@@ -4,86 +4,62 @@ logging.basicConfig(
level=logging.DEBUG) level=logging.DEBUG)
import requests import requests
import settings
MEILI_URL = 'http://127.0.0.1:7700/' SEARCH_ENABLED = bool(settings.MEILI_URL)
def meili_api(method, route, json=None, params=None, parse_json=True):
try:
r = method(settings.MEILI_URL + route, json=json, params=params, timeout=4)
if r.status_code > 299:
raise Exception('Bad response code ' + str(r.status_code))
if parse_json:
return r.json()
else:
r.encoding = 'utf-8'
return r.text
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem with MeiliSearch api route: %s: %s', route, str(e))
return False
def create_index(): def create_index():
try: json = dict(uid='qotnews', primaryKey='id')
json = dict(name='qotnews', uid='qotnews') return meili_api(requests.post, 'indexes', json=json)
r = requests.post(MEILI_URL + 'indexes', json=json, timeout=2)
if r.status_code != 201:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem creating MeiliSearch index: {}'.format(str(e)))
return False
def update_rankings(): def update_rankings():
try: json = ['typo', 'words', 'proximity', 'date:desc', 'exactness']
json = ['typo', 'words', 'proximity', 'attribute', 'desc(date)', 'wordsPosition', 'exactness'] return meili_api(requests.post, 'indexes/qotnews/settings/ranking-rules', json=json)
r = requests.post(MEILI_URL + 'indexes/qotnews/settings/ranking-rules', json=json, timeout=2)
if r.status_code != 202:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem setting MeiliSearch ranking rules: {}'.format(str(e)))
return False
def update_attributes(): def update_attributes():
try: json = ['title', 'url', 'author']
json = ['title', 'url', 'author', 'link', 'id'] r = meili_api(requests.post, 'indexes/qotnews/settings/searchable-attributes', json=json)
r = requests.post(MEILI_URL + 'indexes/qotnews/settings/searchable-attributes', json=json, timeout=2) json = ['id', 'ref', 'source', 'author', 'author_link', 'score', 'date', 'title', 'link', 'url', 'num_comments']
if r.status_code != 202: r = meili_api(requests.post, 'indexes/qotnews/settings/displayed-attributes', json=json)
raise Exception('Bad response code ' + str(r.status_code)) return r
return r.json()
r = requests.delete(MEILI_URL + 'indexes/qotnews/settings/displayed-attributes', timeout=2)
if r.status_code != 202:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem setting MeiliSearch searchable attributes: {}'.format(str(e)))
return False
def init(): def init():
create_index() if not SEARCH_ENABLED:
logging.info('Search is not enabled, skipping init.')
return
print(create_index())
update_rankings() update_rankings()
update_attributes() update_attributes()
def put_story(story): def put_story(story):
story = story.copy() if not SEARCH_ENABLED: return
story.pop('text', None) return meili_api(requests.post, 'indexes/qotnews/documents', [story])
story.pop('comments', None)
try:
r = requests.post(MEILI_URL + 'indexes/qotnews/documents', json=[story], timeout=2)
if r.status_code != 202:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem putting MeiliSearch story: {}'.format(str(e)))
return False
def search(q): def search(q):
try: if not SEARCH_ENABLED: return []
params = dict(q=q, limit=250) params = dict(q=q, limit=settings.FEED_LENGTH)
r = requests.get(MEILI_URL + 'indexes/qotnews/search', params=params, timeout=2) r = meili_api(requests.get, 'indexes/qotnews/search', params=params, parse_json=False)
if r.status_code != 200: return r
raise Exception('Bad response code ' + str(r.status_code))
return r.json()['hits']
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem searching MeiliSearch: {}'.format(str(e)))
return False
if __name__ == '__main__': if __name__ == '__main__':
create_index() init()
print(search('the')) print(update_rankings())
print(search('facebook'))

View File

@@ -15,6 +15,7 @@ import traceback
import time import time
from urllib.parse import urlparse, parse_qs from urllib.parse import urlparse, parse_qs
import settings
import database import database
import search import search
import feed import feed
@@ -27,7 +28,6 @@ from flask_cors import CORS
database.init() database.init()
search.init() search.init()
FEED_LENGTH = 75
news_index = 0 news_index = 0
def new_id(): def new_id():
@@ -42,7 +42,9 @@ cors = CORS(flask_app)
@flask_app.route('/api') @flask_app.route('/api')
def api(): def api():
stories = database.get_stories(FEED_LENGTH) skip = request.args.get('skip', 0)
limit = request.args.get('limit', settings.FEED_LENGTH)
stories = database.get_stories(limit, skip)
# hacky nested json # hacky nested json
res = Response('{"stories":[' + ','.join(stories) + ']}') res = Response('{"stories":[' + ','.join(stories) + ']}')
res.headers['content-type'] = 'application/json' res.headers['content-type'] = 'application/json'
@@ -54,8 +56,10 @@ def apisearch():
if len(q) >= 3: if len(q) >= 3:
results = search.search(q) results = search.search(q)
else: else:
results = [] results = '[]'
return dict(results=results) res = Response(results)
res.headers['content-type'] = 'application/json'
return res
@flask_app.route('/api/submit', methods=['POST'], strict_slashes=False) @flask_app.route('/api/submit', methods=['POST'], strict_slashes=False)
def submit(): def submit():
@@ -63,6 +67,8 @@ def submit():
url = request.form['url'] url = request.form['url']
nid = new_id() nid = new_id()
logging.info('Manual submission: ' + url)
parse = urlparse(url) parse = urlparse(url)
if 'news.ycombinator.com' in parse.hostname: if 'news.ycombinator.com' in parse.hostname:
source = 'hackernews' source = 'hackernews'
@@ -70,6 +76,9 @@ def submit():
elif 'tildes.net' in parse.hostname and '~' in url: elif 'tildes.net' in parse.hostname and '~' in url:
source = 'tildes' source = 'tildes'
ref = parse.path.split('/')[2] ref = parse.path.split('/')[2]
elif 'lobste.rs' in parse.hostname and '/s/' in url:
source = 'lobsters'
ref = parse.path.split('/')[2]
elif 'reddit.com' in parse.hostname and 'comments' in url: elif 'reddit.com' in parse.hostname and 'comments' in url:
source = 'reddit' source = 'reddit'
ref = parse.path.split('/')[4] ref = parse.path.split('/')[4]
@@ -113,9 +122,11 @@ def story(sid):
@flask_app.route('/search') @flask_app.route('/search')
def index(): def index():
return render_template('index.html', return render_template('index.html',
title='Feed', title='QotNews',
url='news.t0.vc', url='news.t0.vc',
description='Reddit, Hacker News, and Tildes combined, then pre-rendered in reader mode') description='Hacker News, Reddit, Lobsters, and Tildes articles rendered in reader mode',
robots='index',
)
@flask_app.route('/<sid>', strict_slashes=False) @flask_app.route('/<sid>', strict_slashes=False)
@flask_app.route('/<sid>/c', strict_slashes=False) @flask_app.route('/<sid>/c', strict_slashes=False)
@@ -140,9 +151,11 @@ def static_story(sid):
url = url.replace('www.', '') url = url.replace('www.', '')
return render_template('index.html', return render_template('index.html',
title=story['title'], title=story['title'] + ' | QotNews',
url=url, url=url,
description=description) description=description,
robots='noindex',
)
http_server = WSGIServer(('', 33842), flask_app) http_server = WSGIServer(('', 33842), flask_app)
@@ -158,12 +171,13 @@ def feed_thread():
continue continue
try: try:
nid = new_id() nid = new_id()
logging.info('Adding ref: {}, id: {}, source: {}'.format(ref, nid, source))
database.put_ref(ref, nid, source) database.put_ref(ref, nid, source)
logging.info('Added ref ' + ref)
except database.IntegrityError: except database.IntegrityError:
logging.info('Already have ID / ref, skipping.')
continue continue
ref_list = database.get_reflist(FEED_LENGTH) ref_list = database.get_reflist(settings.FEED_LENGTH)
# update current stories # update current stories
if news_index < len(ref_list): if news_index < len(ref_list):
@@ -175,7 +189,7 @@ def feed_thread():
except AttributeError: except AttributeError:
story = dict(id=item['sid'], ref=item['ref'], source=item['source']) story = dict(id=item['sid'], ref=item['ref'], source=item['source'])
logging.info('Updating story: ' + str(story['ref']) + ', index: ' + str(news_index)) logging.info('Updating {} story: {}, index: {}'.format(story['source'], story['ref'], news_index))
valid = feed.update_story(story) valid = feed.update_story(story)
if valid: if valid:
@@ -190,18 +204,18 @@ def feed_thread():
gevent.sleep(6) gevent.sleep(6)
news_index += 1 news_index += 1
if news_index == FEED_LENGTH: news_index = 0 if news_index == settings.FEED_LENGTH: news_index = 0
except KeyboardInterrupt: except KeyboardInterrupt:
logging.info('Ending feed thread...') logging.info('Ending feed thread...')
except ValueError as e: except ValueError as e:
logging.error('feed_thread error: {} {}'.format(e.__class__.__name__, e)) logging.critical('feed_thread error: {} {}'.format(e.__class__.__name__, e))
http_server.stop() http_server.stop()
print('Starting Feed thread...') logging.info('Starting Feed thread...')
gevent.spawn(feed_thread) gevent.spawn(feed_thread)
print('Starting HTTP thread...') logging.info('Starting HTTP thread...')
try: try:
http_server.serve_forever() http_server.serve_forever()
except KeyboardInterrupt: except KeyboardInterrupt:

View File

@@ -4,10 +4,21 @@
# Feed Lengths # Feed Lengths
# Number of top items from each site to pull # Number of top items from each site to pull
# set to 0 to disable that site # set to 0 to disable that site
FEED_LENGTH = 75
NUM_HACKERNEWS = 15 NUM_HACKERNEWS = 15
NUM_REDDIT = 10 NUM_LOBSTERS = 10
NUM_REDDIT = 15
NUM_TILDES = 5 NUM_TILDES = 5
# Meilisearch server URL
# Leave blank if not using search
#MEILI_URL = 'http://127.0.0.1:7700/'
MEILI_URL = ''
# Readerserver URL
# Leave blank if not using, but that defeats the whole point
READER_URL = 'http://127.0.0.1:33843/'
# Reddit account info # Reddit account info
# leave blank if not using Reddit # leave blank if not using Reddit
REDDIT_CLIENT_ID = '' REDDIT_CLIENT_ID = ''
@@ -22,13 +33,9 @@ SUBREDDITS = [
'HistoryofIdeas', 'HistoryofIdeas',
'LaymanJournals', 'LaymanJournals',
'PhilosophyofScience', 'PhilosophyofScience',
'PoliticsPDFs',
'Scholar',
'StateOfTheUnion', 'StateOfTheUnion',
'TheAgora', 'TheAgora',
'TrueFilm',
'TrueReddit', 'TrueReddit',
'UniversityofReddit',
'culturalstudies', 'culturalstudies',
'hardscience', 'hardscience',
'indepthsports', 'indepthsports',
@@ -37,4 +44,7 @@ SUBREDDITS = [
'neurophilosophy', 'neurophilosophy',
'resilientcommunities', 'resilientcommunities',
'worldevents', 'worldevents',
'StallmanWasRight',
'EverythingScience',
'longevity',
] ]

View File

@@ -8,6 +8,14 @@ import string
from bleach.sanitizer import Cleaner from bleach.sanitizer import Cleaner
def alert_tanner(message):
try:
logging.info('Alerting Tanner: ' + message)
params = dict(qotnews=message)
requests.get('https://tbot.tannercollin.com/message', params=params, timeout=4)
except BaseException as e:
logging.error('Problem alerting Tanner: ' + str(e))
def gen_rand_id(): def gen_rand_id():
return ''.join(random.choice(string.ascii_uppercase) for _ in range(4)) return ''.join(random.choice(string.ascii_uppercase) for _ in range(4))

View File

@@ -35,6 +35,7 @@ app.post('/', (req, res) => {
const url = req.body.url; const url = req.body.url;
const requestOptions = { const requestOptions = {
url: url, url: url,
gzip: true,
//headers: {'User-Agent': 'Googlebot/2.1 (+http://www.google.com/bot.html)'}, //headers: {'User-Agent': 'Googlebot/2.1 (+http://www.google.com/bot.html)'},
//headers: {'User-Agent': 'Twitterbot/1.0'}, //headers: {'User-Agent': 'Twitterbot/1.0'},
headers: { headers: {

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,7 @@
Download MeiliSearch with: Download MeiliSearch with:
``` ```
wget https://github.com/meilisearch/MeiliSearch/releases/download/v0.11.1/meilisearch-linux-amd64 wget https://github.com/meilisearch/meilisearch/releases/download/v0.27.0/meilisearch-linux-amd64
chmod +x meilisearch-linux-amd64 chmod +x meilisearch-linux-amd64
``` ```

View File

@@ -8,6 +8,8 @@
content="{{ description }}" content="{{ description }}"
/> />
<meta content="{{ url }}" name="og:site_name"> <meta content="{{ url }}" name="og:site_name">
<meta name="robots" content="{{ robots }}">
<link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon.png"> <link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon.png">
<link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png">
@@ -26,7 +28,7 @@
work correctly both with client-side routing and a non-root public URL. work correctly both with client-side routing and a non-root public URL.
Learn how to configure a non-root public URL by running `npm run build`. Learn how to configure a non-root public URL by running `npm run build`.
--> -->
<title>{{ title }} - QotNews</title> <title>{{ title }}</title>
<style> <style>
html { html {
@@ -37,13 +39,23 @@
} }
.nojs { .nojs {
color: white; color: white;
max-width: 32rem;
} }
</style> </style>
</head> </head>
<body> <body>
<div class="nojs"> <div class="nojs">
<noscript>You need to enable JavaScript to run this app.</noscript> <noscript>
You need to enable JavaScript to run this app because it's written in React.
I was planning on writing a server-side version, but I've become distracted
by other projects -- sorry!
<br/>
I originally wrote this for myself, and of course I whitelist JavaScript on
all my own domains.
<br/><br/>
Alternatively, try activex.news.t0.vc for an ActiveX™ version.
</noscript>
</div> </div>
<div id="root"></div> <div id="root"></div>
<!-- <!--

View File

@@ -3,8 +3,10 @@ import { BrowserRouter as Router, Route, Link, Switch } from 'react-router-dom';
import localForage from 'localforage'; import localForage from 'localforage';
import './Style-light.css'; import './Style-light.css';
import './Style-dark.css'; import './Style-dark.css';
import './Style-black.css';
import './Style-red.css';
import './fonts/Fonts.css'; import './fonts/Fonts.css';
import { ForwardDot } from './utils.js'; import { BackwardDot, ForwardDot } from './utils.js';
import Feed from './Feed.js'; import Feed from './Feed.js';
import Article from './Article.js'; import Article from './Article.js';
import Comments from './Comments.js'; import Comments from './Comments.js';
@@ -38,6 +40,16 @@ class App extends React.Component {
localStorage.setItem('theme', 'dark'); localStorage.setItem('theme', 'dark');
} }
black() {
this.setState({ theme: 'black' });
localStorage.setItem('theme', 'black');
}
red() {
this.setState({ theme: 'red' });
localStorage.setItem('theme', 'red');
}
componentDidMount() { componentDidMount() {
if (!this.cache.length) { if (!this.cache.length) {
localForage.iterate((value, key) => { localForage.iterate((value, key) => {
@@ -47,22 +59,61 @@ class App extends React.Component {
} }
} }
goFullScreen() {
if ('wakeLock' in navigator) {
navigator.wakeLock.request('screen');
}
document.body.requestFullscreen({ navigationUI: 'hide' }).then(() => {
window.addEventListener('resize', () => this.forceUpdate());
this.forceUpdate();
});
};
exitFullScreen() {
document.exitFullscreen().then(() => {
this.forceUpdate();
});
};
render() { render() {
const theme = this.state.theme; const theme = this.state.theme;
document.body.style.backgroundColor = theme === 'dark' ? '#000' : '#eeeeee';
if (theme === 'dark') {
document.body.style.backgroundColor = '#1a1a1a';
} else if (theme === 'black') {
document.body.style.backgroundColor = '#000';
} else if (theme === 'red') {
document.body.style.backgroundColor = '#000';
} else {
document.body.style.backgroundColor = '#eeeeee';
}
const fullScreenAvailable = document.fullscreenEnabled ||
document.mozFullscreenEnabled ||
document.webkitFullscreenEnabled ||
document.msFullscreenEnabled;
return ( return (
<div className={theme}> <div className={theme}>
<Router> <Router>
<div className='container menu'> <div className='container menu'>
<p> <p>
<Link to='/'>QotNews - Feed</Link> <Link to='/'>QotNews</Link>
<span className='theme'>Theme: <a href='#' onClick={() => this.light()}>Light</a> - <a href='#' onClick={() => this.dark()}>Dark</a></span>
<span className='theme'><a href='#' onClick={() => this.light()}>Light</a> - <a href='#' onClick={() => this.dark()}>Dark</a> - <a href='#' onClick={() => this.black()}>Black</a> - <a href='#' onClick={() => this.red()}>Red</a></span>
<br /> <br />
<span className='slogan'>Reddit, Hacker News, and Tildes combined, then pre-rendered in reader mode.</span> <span className='slogan'>Hacker News, Reddit, Lobsters, and Tildes articles rendered in reader mode.</span>
</p> </p>
<Route path='/(|search)' component={Search} /> <Route path='/(|search)' component={Search} />
<Route path='/(|search)' component={Submit} /> <Route path='/(|search)' component={Submit} />
{fullScreenAvailable &&
<Route path='/(|search)' render={() => !document.fullscreenElement ?
<button className='fullscreen' onClick={() => this.goFullScreen()}>Enter Fullscreen</button>
:
<button className='fullscreen' onClick={() => this.exitFullScreen()}>Exit Fullscreen</button>
} />
}
</div> </div>
<Route path='/' exact render={(props) => <Feed {...props} updateCache={this.updateCache} />} /> <Route path='/' exact render={(props) => <Feed {...props} updateCache={this.updateCache} />} />
@@ -72,6 +123,7 @@ class App extends React.Component {
</Switch> </Switch>
<Route path='/:id/c' exact render={(props) => <Comments {...props} cache={this.cache} />} /> <Route path='/:id/c' exact render={(props) => <Comments {...props} cache={this.cache} />} />
<BackwardDot />
<ForwardDot /> <ForwardDot />
<ScrollToTop /> <ScrollToTop />

View File

@@ -67,7 +67,8 @@ class Article extends React.Component {
{story ? {story ?
<div className='article'> <div className='article'>
<Helmet> <Helmet>
<title>{story.title} - QotNews</title> <title>{story.title} | QotNews</title>
<meta name="robots" content="noindex" />
</Helmet> </Helmet>
<h1>{story.title}</h1> <h1>{story.title}</h1>

View File

@@ -115,7 +115,8 @@ class Article extends React.Component {
{story ? {story ?
<div className='article'> <div className='article'>
<Helmet> <Helmet>
<title>{story.title} - QotNews Comments</title> <title>{story.title} | QotNews</title>
<meta name="robots" content="noindex" />
</Helmet> </Helmet>
<h1>{story.title}</h1> <h1>{story.title}</h1>

View File

@@ -52,7 +52,8 @@ class Feed extends React.Component {
return ( return (
<div className='container'> <div className='container'>
<Helmet> <Helmet>
<title>Feed - QotNews</title> <title>QotNews</title>
<meta name="robots" content="index" />
</Helmet> </Helmet>
{error && <p>Connection error?</p>} {error && <p>Connection error?</p>}
{stories ? {stories ?

View File

@@ -29,7 +29,7 @@ class Results extends React.Component {
.then(res => res.json()) .then(res => res.json())
.then( .then(
(result) => { (result) => {
this.setState({ stories: result.results }); this.setState({ stories: result.hits });
}, },
(error) => { (error) => {
if (error.message !== 'The operation was aborted. ') { if (error.message !== 'The operation was aborted. ') {
@@ -56,7 +56,7 @@ class Results extends React.Component {
return ( return (
<div className='container'> <div className='container'>
<Helmet> <Helmet>
<title>Feed - QotNews</title> <title>Search Results | QotNews</title>
</Helmet> </Helmet>
{error && <p>Connection error?</p>} {error && <p>Connection error?</p>}
{stories ? {stories ?

View File

@@ -15,6 +15,7 @@ class ScrollToTop extends React.Component {
} }
window.scrollTo(0, 0); window.scrollTo(0, 0);
document.body.scrollTop = 0;
} }
render() { render() {

View File

@@ -37,7 +37,7 @@ class Search extends Component {
<span className='search'> <span className='search'>
<form onSubmit={this.searchAgain}> <form onSubmit={this.searchAgain}>
<input <input
placeholder='Search... (fixed)' placeholder='Search...'
value={search} value={search}
onChange={this.searchArticles} onChange={this.searchArticles}
ref={this.inputRef} ref={this.inputRef}

View File

@@ -0,0 +1,68 @@
.black {
color: #ddd;
}
.black a {
color: #ddd;
}
.black input {
color: #ddd;
border: 1px solid #828282;
}
.black button {
background-color: #444444;
border-color: #bbb;
color: #ddd;
}
.black .item {
color: #828282;
}
.black .item .source-logo {
filter: grayscale(1);
}
.black .item a {
color: #828282;
}
.black .item a.link {
color: #ddd;
}
.black .item a.link:visited {
color: #828282;
}
.black .item .info a.hot {
color: #cccccc;
}
.black .article a {
border-bottom: 1px solid #aaaaaa;
}
.black .article u {
border-bottom: 1px solid #aaaaaa;
text-decoration: none;
}
.black .story-text video,
.black .story-text img {
filter: brightness(50%);
}
.black .article .info {
color: #828282;
}
.black .article .info a {
border-bottom: none;
color: #828282;
}
.black .comment.lined {
border-left: 1px solid #444444;
}

View File

@@ -11,12 +11,14 @@
border: 1px solid #828282; border: 1px solid #828282;
} }
.dark .item { .dark button {
color: #828282; background-color: #444444;
border-color: #bbb;
color: #ddd;
} }
.dark .item .source-logo { .dark .item {
filter: grayscale(1); color: #828282;
} }
.dark .item a { .dark .item a {
@@ -43,6 +45,7 @@
text-decoration: none; text-decoration: none;
} }
.dark .story-text video,
.dark .story-text img { .dark .story-text img {
filter: brightness(50%); filter: brightness(50%);
} }

View File

@@ -2,9 +2,30 @@ body {
text-rendering: optimizeLegibility; text-rendering: optimizeLegibility;
font: 1rem/1.3 sans-serif; font: 1rem/1.3 sans-serif;
color: #000000; color: #000000;
margin-bottom: 100vh;
word-break: break-word; word-break: break-word;
font-kerning: normal; font-kerning: normal;
margin: 0;
}
::backdrop {
background-color: rgba(0,0,0,0);
}
body:fullscreen {
overflow-y: scroll !important;
}
body:-ms-fullscreen {
overflow-y: scroll !important;
}
body:-webkit-full-screen {
overflow-y: scroll !important;
}
body:-moz-full-screen {
overflow-y: scroll !important;
}
#root {
margin: 8px 8px 100vh 8px !important;
} }
a { a {
@@ -22,6 +43,12 @@ input {
border-radius: 4px; border-radius: 4px;
} }
.fullscreen {
margin: 0.25rem;
padding: 0.25rem;
}
pre { pre {
overflow: auto; overflow: auto;
} }
@@ -185,16 +212,20 @@ span.source {
cursor: pointer; cursor: pointer;
} }
.toggleDot { .dot {
cursor: pointer;
position: fixed; position: fixed;
bottom: 1rem;
left: 1rem;
height: 3rem; height: 3rem;
width: 3rem; width: 3rem;
background-color: #828282; background-color: #828282;
border-radius: 50%; border-radius: 50%;
} }
.toggleDot {
bottom: 1rem;
left: 1rem;
}
.toggleDot .button { .toggleDot .button {
font: 2rem/1 'icomoon'; font: 2rem/1 'icomoon';
position: relative; position: relative;
@@ -203,21 +234,27 @@ span.source {
} }
.forwardDot { .forwardDot {
cursor: pointer;
position: fixed;
bottom: 1rem; bottom: 1rem;
right: 1rem; right: 1rem;
height: 3rem;
width: 3rem;
background-color: #828282;
border-radius: 50%;
} }
.forwardDot .button { .forwardDot .button {
font: 2.5rem/1 'icomoon'; font: 2rem/1 'icomoon';
position: relative; position: relative;
top: 0.25rem; top: 0.5rem;
left: 0.3rem; left: 0.5rem;
}
.backwardDot {
bottom: 1rem;
right: 5rem;
}
.backwardDot .button {
font: 2rem/1 'icomoon';
position: relative;
top: 0.5rem;
left: 0.5rem;
} }
.search form { .search form {

View File

@@ -0,0 +1,82 @@
.red {
color: #b00;
scrollbar-color: #b00 #440000;
}
.red a {
color: #b00;
}
.red input {
color: #b00;
border: 1px solid #690000;
}
.red input::placeholder {
color: #690000;
}
.red hr {
background-color: #690000;
}
.red button {
background-color: #440000;
border-color: #b00;
color: #b00;
}
.red .item,
.red .slogan {
color: #690000;
}
.red .item .source-logo {
display: none;
}
.red .item a {
color: #690000;
}
.red .item a.link {
color: #b00;
}
.red .item a.link:visited {
color: #690000;
}
.red .item .info a.hot {
color: #cc0000;
}
.red .article a {
border-bottom: 1px solid #aa0000;
}
.red .article u {
border-bottom: 1px solid #aa0000;
text-decoration: none;
}
.red .story-text video,
.red .story-text img {
filter: grayscale(100%) brightness(20%) sepia(100%) hue-rotate(-50deg) saturate(600%) contrast(0.8);
}
.red .article .info {
color: #690000;
}
.red .article .info a {
border-bottom: none;
color: #690000;
}
.red .comment.lined {
border-left: 1px solid #440000;
}
.red .dot {
background-color: #440000;
}

View File

@@ -41,7 +41,7 @@ class Submit extends Component {
<span className='search'> <span className='search'>
<form onSubmit={this.submitArticle}> <form onSubmit={this.submitArticle}>
<input <input
placeholder='Submit Article' placeholder='Submit URL'
ref={this.inputRef} ref={this.inputRef}
/> />
</form> </form>

Binary file not shown.

File diff suppressed because one or more lines are too long