Compare commits

..

85 Commits

Author SHA1 Message Date
Jason Schwarzenberger
2f730c1f52 update declutter. 2020-11-18 15:20:23 +13:00
Jason Schwarzenberger
e0960d59f3 update readme. 2020-11-18 13:26:34 +13:00
Jason Schwarzenberger
f5b38f5c6b remove readerserver, add declutter. 2020-11-18 12:59:35 +13:00
Jason Schwarzenberger
c9da2a078b increase setTimeouts. 2020-11-18 10:06:45 +13:00
Jason Schwarzenberger
78654e0c63 reduce setTimeout. 2020-11-17 16:07:33 +13:00
Jason Schwarzenberger
3b885e4327 renaming things. 2020-11-17 15:54:14 +13:00
Jason Schwarzenberger
55d50a86d8 hmmm 2020-11-17 15:13:38 +13:00
Jason Schwarzenberger
55e7f6bb14 cosmetic filters for newshub. 2020-11-17 15:01:12 +13:00
Jason Schwarzenberger
5668fa5dbc fix mistake. 2020-11-17 12:54:54 +13:00
Jason Schwarzenberger
b771b52501 add regex to get a unique ref from each sitemap/category based article url. 2020-11-17 12:38:28 +13:00
Jason Schwarzenberger
f5c7a658ba cosmetic filters for the spinoff. 2020-11-16 16:49:39 +13:00
Jason Schwarzenberger
f5ccd844da fix import error. 2020-11-16 15:41:09 +13:00
Jason Schwarzenberger
6a91b9402f split categories, sitemap and other crap out of news.py 2020-11-16 15:30:33 +13:00
Jason Schwarzenberger
b80c1a5cb5 extract story list item from Results and Feed. 2020-11-16 13:17:58 +13:00
Jason Schwarzenberger
b23e470317 move reddit thresholds as settings variables. 2020-11-16 10:11:39 +13:00
Jason Schwarzenberger
7420b5ece9 fix microdata multiple authors 2020-11-12 17:33:46 +13:00
Jason Schwarzenberger
64ced635cc fix mistake. 2020-11-12 17:15:29 +13:00
Jason Schwarzenberger
9318627f1b ability to pass in multiple site maps/category urls. 2020-11-12 17:11:51 +13:00
Jason Schwarzenberger
3d0a3f1577 support list based json-ld authors. 2020-11-12 15:08:23 +13:00
Jason Schwarzenberger
587b10c438 recursive sitemaps (sitemap indexes) 2020-11-12 14:56:46 +13:00
Jason
00954c6cac local browser scraper 2020-11-11 09:26:54 +00:00
Jason Schwarzenberger
637bc38476 fix mistake. 2020-11-11 17:21:31 +13:00
Jason Schwarzenberger
164b7e72c4 basically add declutter like capabilities. 2020-11-11 17:16:04 +13:00
Jason Schwarzenberger
3169af3002 hostname from settings. 2020-11-11 09:46:27 +13:00
Jason Schwarzenberger
d588a60930 add source to searchable attributes. 2020-11-11 09:37:54 +13:00
Jason Schwarzenberger
408e2870b2 tzinfo and microdata schema urls. 2020-11-10 16:51:27 +13:00
Jason Schwarzenberger
44b8b36547 add data cast in query. 2020-11-10 15:50:18 +13:00
Jason Schwarzenberger
4f49684194 remove logos from utils.js 2020-11-10 15:38:48 +13:00
Jason Schwarzenberger
1d78b1c592 fix favicon url. 2020-11-10 15:34:21 +13:00
Jason Schwarzenberger
0374794536 Sitemap and Category to get favicon into icon property of story. 2020-11-10 15:22:27 +13:00
Jason Schwarzenberger
943a1cfa4f reader server 2020-11-10 14:56:21 +13:00
Jason Schwarzenberger
9cee370a25 tvnz icon 2020-11-10 14:10:02 +13:00
Jason Schwarzenberger
5efc6ef2d3 add related stories (in api only) 2020-11-10 14:09:56 +13:00
Jason Schwarzenberger
4ec50e20cb feed thread loop. 2020-11-10 10:10:38 +13:00
Jason Schwarzenberger
c1b7877f4b remove limit. 2020-11-09 17:54:50 +13:00
Jason Schwarzenberger
7b8cbfc9b9 try to make feed only determined by the max age. 2020-11-09 17:50:58 +13:00
Jason Schwarzenberger
bfa4108a8e Merge remote-tracking branch 'tanner/master' 2020-11-09 16:08:28 +13:00
Jason Schwarzenberger
0bd0d40a31 use json type in sqlite. 2020-11-09 15:45:10 +13:00
Jason Schwarzenberger
4e04595415 fix search. 2020-11-09 15:44:44 +13:00
Jason
006db2960c change to 3 days 2020-11-09 01:36:51 +00:00
Jason Schwarzenberger
1f063f0dac undo log level change 2020-11-06 11:20:34 +13:00
Jason Schwarzenberger
1658346aa9 fix news.py feed. 2020-11-06 10:37:43 +13:00
Jason Schwarzenberger
2dbc702b40 switch to python-dateutil for parser, reverse sort xml feeds. 2020-11-06 10:02:39 +13:00
Jason Schwarzenberger
1c4764e67d sort sitemap feed by lastmod time. 2020-11-06 09:30:15 +13:00
Jason
ee49d2021e newsroom 2020-11-05 20:28:55 +00:00
Jason
c391c50ab1 use localize 2020-11-05 04:15:31 +00:00
Jason Schwarzenberger
095f0d549a use replace. 2020-11-05 16:57:08 +13:00
Jason Schwarzenberger
c21c71667e fix date issue. 2020-11-05 16:41:15 +13:00
Jason Schwarzenberger
c3a2c91a11 update requirements.txt 2020-11-05 16:33:50 +13:00
Jason Schwarzenberger
0f39446a61 tz aware for use in settings. 2020-11-05 16:30:55 +13:00
Jason Schwarzenberger
351059aab1 fix excludes. 2020-11-05 15:59:13 +13:00
Jason Schwarzenberger
4488e2c292 add an excludes list of substrings for urls in the settings for sitemap/category. 2020-11-05 15:51:59 +13:00
Jason Schwarzenberger
afda5b635c disqus test. 2020-11-05 14:23:51 +13:00
Jason Schwarzenberger
0fc1a44d2b fix issue in substack. 2020-11-04 17:40:29 +13:00
Jason Schwarzenberger
9fff1b9e46 avoid duplicate articles listed on the category page 2020-11-04 17:14:42 +13:00
Jason Schwarzenberger
16b59f6c67 try stop bad pages. 2020-11-04 16:34:31 +13:00
Jason Schwarzenberger
939f4775a7 better settings example. 2020-11-04 15:52:34 +13:00
Jason Schwarzenberger
9bfc6fc6fa scraper settings, ordering and loop. 2020-11-04 15:47:12 +13:00
Jason Schwarzenberger
6ea9844d00 remove useless try blocks. 2020-11-04 15:37:19 +13:00
Jason Schwarzenberger
1318259d3d imply referrer is substack. 2020-11-04 15:21:07 +13:00
Jason Schwarzenberger
98a0c2257c increase declutter timeout. 2020-11-04 15:15:00 +13:00
Jason Schwarzenberger
e6976db25d fix tabs 2020-11-04 15:04:20 +13:00
Jason Schwarzenberger
9edc8b7cca move scraping for article content to files. 2020-11-04 15:00:58 +13:00
Jason Schwarzenberger
33e21e7f30 fix mistake. 2020-11-04 12:45:01 +13:00
Jason Schwarzenberger
892a99eca6 add + expander in place of collapser. 2020-11-04 12:43:15 +13:00
Jason Schwarzenberger
d718d05a04 fix dates for newsroom. 2020-11-04 11:53:16 +13:00
Jason Schwarzenberger
d1795eb1b8 add radionz and newsroom logos. 2020-11-04 11:30:56 +13:00
Jason Schwarzenberger
9f4ff4acf0 remove unnecessary sitemap.xml request. 2020-11-04 11:22:15 +13:00
Jason Schwarzenberger
db6aad84ec fix mistake. 2020-11-04 11:12:01 +13:00
Jason Schwarzenberger
29f8a8b8cc add news site categories feed. 2020-11-04 11:08:50 +13:00
Jason
abf8589e02 fix sitemap 2020-11-03 10:53:40 +00:00
Jason
b759f46582 use extruct for opengraph/json-ld/microdata of articles 2020-11-03 10:31:36 +00:00
Jason Schwarzenberger
736cdc8576 fix mistake. 2020-11-03 17:04:46 +13:00
Jason Schwarzenberger
244d416f6e settings config of sitemap/substack publications. 2020-11-03 17:01:29 +13:00
Jason Schwarzenberger
5f98a2e76a Merge remote-tracking branch 'tanner/master' into master
And adding relevant setings.py.example/etc.
2020-11-03 16:44:02 +13:00
Jason Schwarzenberger
0567cdfd9b move sort to render. 2020-11-03 16:30:22 +13:00
Jason Schwarzenberger
4f90671cec order feed by reverse chronological 2020-11-03 16:21:23 +13:00
Jason Schwarzenberger
e63a1456a5 add logos. 2020-11-03 16:07:07 +13:00
Jason Schwarzenberger
76f1d57702 sitemap based feed. 2020-11-03 16:00:03 +13:00
Jason Schwarzenberger
de80389ed0 add logos. 2020-11-03 12:48:19 +13:00
Jason Schwarzenberger
4e64cf682a add the bulletin. 2020-11-03 12:41:16 +13:00
Jason Schwarzenberger
c5fe5d25a0 add substack.py top sites, replacing webworm.py 2020-11-03 12:28:39 +13:00
Jason
283a2b1545 fix webworm comments 2020-11-02 22:06:43 +00:00
Jason Schwarzenberger
0d6a86ace2 fix webworm dates. 2020-11-03 10:31:14 +13:00
Jason Schwarzenberger
f23bf628e0 add webworm/substack as a feed. 2020-11-02 17:09:59 +13:00
68 changed files with 5026 additions and 8089 deletions

1
.gitignore vendored
View File

@@ -1 +0,0 @@
.aider*

3
.gitmodules vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule "readerserver"]
path = readerserver
url = https://github.com/master5o1/declutter.git

View File

@@ -20,7 +20,7 @@ $ sudo apt install yarn
Clone this repo: Clone this repo:
```text ```text
$ git clone https://gogs.tannercollin.com/tanner/qotnews.git $ git clone --recurse-submodules https://git.1j.nz/jason/qotnews.git
$ cd qotnews $ cd qotnews
``` ```
@@ -37,14 +37,14 @@ $ source env/bin/activate
Configure Praw for your Reddit account (optional): Configure Praw for your Reddit account (optional):
* Go to https://www.reddit.com/prefs/apps - Go to https://www.reddit.com/prefs/apps
* Click "Create app" - Click "Create app"
* Name: whatever - Name: whatever
* App type: script - App type: script
* Description: blank - Description: blank
* About URL: blank - About URL: blank
* Redirect URL: your GitHub profile - Redirect URL: your GitHub profile
* Submit, copy the client ID and client secret into `settings.py` below - Submit, copy the client ID and client secret into `settings.py` below
```text ```text
(env) $ vim settings.py.example (env) $ vim settings.py.example
@@ -109,7 +109,7 @@ stdout_logfile_maxbytes=1MB
[program:qotnewsreader] [program:qotnewsreader]
user=qotnews user=qotnews
directory=/home/qotnews/qotnews/readerserver directory=/home/qotnews/qotnews/readerserver
command=node main.js command=node index.js
autostart=true autostart=true
autorestart=true autorestart=true
stderr_logfile=/var/log/qotnewsreader.log stderr_logfile=/var/log/qotnewsreader.log

View File

@@ -109,5 +109,4 @@ settings.py
data.db data.db
data.db.bak data.db.bak
data/archive/* data/archive/*
data/backup/*
qotnews.sqlite qotnews.sqlite

View File

@@ -1,11 +1,11 @@
import json from datetime import datetime, timedelta
from sqlalchemy import create_engine, Column, String, ForeignKey, Integer from sqlalchemy import create_engine, Column, String, ForeignKey, Integer
from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import IntegrityError from sqlalchemy.exc import IntegrityError
from sqlalchemy.types import JSON
engine = create_engine('sqlite:///data/qotnews.sqlite', connect_args={'timeout': 360}) engine = create_engine('sqlite:///data/qotnews.sqlite')
Session = sessionmaker(bind=engine) Session = sessionmaker(bind=engine)
Base = declarative_base() Base = declarative_base()
@@ -15,8 +15,8 @@ class Story(Base):
sid = Column(String(16), primary_key=True) sid = Column(String(16), primary_key=True)
ref = Column(String(16), unique=True) ref = Column(String(16), unique=True)
meta_json = Column(String) meta = Column(JSON)
full_json = Column(String) data = Column(JSON)
title = Column(String) title = Column(String)
class Reflist(Base): class Reflist(Base):
@@ -24,6 +24,7 @@ class Reflist(Base):
rid = Column(Integer, primary_key=True) rid = Column(Integer, primary_key=True)
ref = Column(String(16), unique=True) ref = Column(String(16), unique=True)
urlref = Column(String)
sid = Column(String, ForeignKey('stories.sid'), unique=True) sid = Column(String, ForeignKey('stories.sid'), unique=True)
source = Column(String(16)) source = Column(String(16))
@@ -36,19 +37,21 @@ def get_story(sid):
def put_story(story): def put_story(story):
story = story.copy() story = story.copy()
full_json = json.dumps(story) data = {}
data.update(story)
story.pop('text', None) meta = {}
story.pop('comments', None) meta.update(story)
meta_json = json.dumps(story) meta.pop('text', None)
meta.pop('comments', None)
try: try:
session = Session() session = Session()
s = Story( s = Story(
sid=story['id'], sid=story['id'],
ref=story['ref'], ref=story['ref'],
full_json=full_json, data=data,
meta_json=meta_json, meta=meta,
title=story.get('title', None), title=story.get('title', None),
) )
session.merge(s) session.merge(s)
@@ -63,25 +66,32 @@ def get_story_by_ref(ref):
session = Session() session = Session()
return session.query(Story).filter(Story.ref==ref).first() return session.query(Story).filter(Story.ref==ref).first()
def get_reflist(amount): def get_stories_by_url(url):
session = Session() session = Session()
q = session.query(Reflist).order_by(Reflist.rid.desc()).limit(amount) return session.query(Story).\
return [dict(ref=x.ref, sid=x.sid, source=x.source) for x in q.all()] filter(Story.title != None).\
filter(Story.meta['url'].as_string() == url).\
order_by(Story.meta['date'].desc())
def get_stories(amount, skip=0): def get_reflist():
session = Session() session = Session()
q = session.query(Reflist, Story.meta_json).\ q = session.query(Reflist).order_by(Reflist.rid.desc())
order_by(Reflist.rid.desc()).\ return [dict(ref=x.ref, sid=x.sid, source=x.source, urlref=x.urlref) for x in q.all()]
def get_stories(maxage=60*60*24*2):
time = datetime.now().timestamp() - maxage
session = Session()
q = session.query(Reflist, Story.meta).\
join(Story).\ join(Story).\
filter(Story.title != None).\ filter(Story.title != None).\
offset(skip).\ filter(Story.meta['date'].as_integer() > time).\
limit(amount) order_by(Story.meta['date'].desc())
return [x[1] for x in q] return [x[1] for x in q]
def put_ref(ref, sid, source): def put_ref(ref, sid, source, urlref):
try: try:
session = Session() session = Session()
r = Reflist(ref=ref, sid=sid, source=source) r = Reflist(ref=ref, sid=sid, source=source, urlref=urlref)
session.add(r) session.add(r)
session.commit() session.commit()
except: except:
@@ -101,22 +111,7 @@ def del_ref(ref):
finally: finally:
session.close() session.close()
def count_stories():
try:
session = Session()
return session.query(Story).count()
finally:
session.close()
def get_story_list():
try:
session = Session()
return session.query(Story.sid).all()
finally:
session.close()
if __name__ == '__main__': if __name__ == '__main__':
init() init()
#print(get_story_by_ref('hgi3sy')) print(get_story_by_ref('hgi3sy'))
print(len(get_reflist(99999)))

View File

@@ -1,8 +1,6 @@
import database import database
import search import search
import sys import sys
import settings
import logging
import json import json
import requests import requests
@@ -23,7 +21,7 @@ def database_del_story(sid):
def search_del_story(sid): def search_del_story(sid):
try: try:
r = requests.delete(settings.MEILI_URL + 'indexes/qotnews/documents/'+sid, timeout=2) r = requests.delete(search.MEILI_URL + 'indexes/qotnews/documents/'+sid, timeout=2)
if r.status_code != 202: if r.status_code != 202:
raise Exception('Bad response code ' + str(r.status_code)) raise Exception('Bad response code ' + str(r.status_code))
return r.json() return r.json()

View File

@@ -6,84 +6,120 @@ logging.basicConfig(
import requests import requests
import time import time
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import itertools
import settings import settings
from feeds import hackernews, reddit, tildes, manual, lobsters from feeds import hackernews, reddit, tildes, substack, manual
import utils from feeds.sitemap import Sitemap
from feeds.category import Category
from scrapers import outline, declutter, headless, simple
INVALID_DOMAINS = ['youtube.com', 'bloomberg.com', 'wsj.com', 'sec.gov'] INVALID_DOMAINS = ['youtube.com', 'bloomberg.com', 'wsj.com']
TWO_DAYS = 60*60*24*2
substacks = {}
for key, value in settings.SUBSTACK.items():
substacks[key] = substack.Publication(value['url'])
categories = {}
for key, value in settings.CATEGORY.items():
categories[key] = Category(value)
sitemaps = {}
for key, value in settings.SITEMAP.items():
sitemaps[key] = Sitemap(value)
def get_list():
feeds = {}
def list():
feed = []
if settings.NUM_HACKERNEWS: if settings.NUM_HACKERNEWS:
feed += [(x, 'hackernews') for x in hackernews.feed()[:settings.NUM_HACKERNEWS]] feeds['hackernews'] = [(x, 'hackernews', x) for x in hackernews.feed()[:settings.NUM_HACKERNEWS]]
if settings.NUM_LOBSTERS:
feed += [(x, 'lobsters') for x in lobsters.feed()[:settings.NUM_LOBSTERS]]
if settings.NUM_REDDIT: if settings.NUM_REDDIT:
feed += [(x, 'reddit') for x in reddit.feed()[:settings.NUM_REDDIT]] feeds['reddit'] = [(x, 'reddit', x) for x in reddit.feed()[:settings.NUM_REDDIT]]
if settings.NUM_TILDES: if settings.NUM_TILDES:
feed += [(x, 'tildes') for x in tildes.feed()[:settings.NUM_TILDES]] feeds['tildes'] = [(x, 'tildes', x) for x in tildes.feed()[:settings.NUM_TILDES]]
if settings.NUM_SUBSTACK:
feeds['substack'] = [(x, 'substack', x) for x in substack.top.feed()[:settings.NUM_SUBSTACK]]
for key, publication in substacks.items():
count = settings.SUBSTACK[key]['count']
feeds[key] = [(x, key, x) for x in publication.feed()[:count]]
for key, sites in categories.items():
count = settings.CATEGORY[key].get('count') or 0
excludes = settings.CATEGORY[key].get('excludes')
tz = settings.CATEGORY[key].get('tz')
feeds[key] = [(x, key, u) for x, u in sites.feed(excludes)[:count]]
for key, sites in sitemaps.items():
count = settings.SITEMAP[key].get('count') or 0
excludes = settings.SITEMAP[key].get('excludes')
feeds[key] = [(x, key, u) for x, u in sites.feed(excludes)[:count]]
values = feeds.values()
feed = itertools.chain.from_iterable(itertools.zip_longest(*values, fillvalue=None))
feed = list(filter(None, feed))
return feed return feed
def get_article(url): def get_article(url):
if not settings.READER_URL: scrapers = {
logging.info('Readerserver not configured, aborting.') 'headless': headless,
return '' 'simple': simple,
'outline': outline,
'declutter': declutter,
}
available = settings.SCRAPERS or ['headless', 'simple']
if 'simple' not in available:
available += ['simple']
if url.startswith('https://twitter.com'): for scraper in available:
logging.info('Replacing twitter.com url with nitter.net') if scraper not in scrapers.keys():
url = url.replace('twitter.com', 'nitter.net') continue
try:
try: html = scrapers[scraper].get_html(url)
r = requests.post(settings.READER_URL, data=dict(url=url), timeout=20) if html:
if r.status_code != 200: return html
raise Exception('Bad response code ' + str(r.status_code)) except KeyboardInterrupt:
return r.text raise
except KeyboardInterrupt: except:
raise pass
except BaseException as e: return ''
logging.error('Problem getting article: {}'.format(str(e)))
return ''
def get_content_type(url): def get_content_type(url):
try:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0'}
return requests.get(url, headers=headers, timeout=5).headers['content-type']
except:
return ''
try: try:
headers = { headers = {
'User-Agent': 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)', 'User-Agent': 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'X-Forwarded-For': '66.249.66.1', 'X-Forwarded-For': '66.249.66.1',
} }
return requests.get(url, headers=headers, timeout=10).headers['content-type'] return requests.get(url, headers=headers, timeout=5).headers['content-type']
except: except:
pass pass
def update_story(story, is_manual=False): try:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0'}
return requests.get(url, headers=headers, timeout=10).headers['content-type']
except:
return ''
def update_story(story, is_manual=False, urlref=None):
res = {} res = {}
try: if story['source'] == 'hackernews':
if story['source'] == 'hackernews': res = hackernews.story(story['ref'])
res = hackernews.story(story['ref']) elif story['source'] == 'reddit':
elif story['source'] == 'lobsters': res = reddit.story(story['ref'])
res = lobsters.story(story['ref']) elif story['source'] == 'tildes':
elif story['source'] == 'reddit': res = tildes.story(story['ref'])
res = reddit.story(story['ref']) elif story['source'] == 'substack':
elif story['source'] == 'tildes': res = substack.top.story(story['ref'])
res = tildes.story(story['ref']) elif story['source'] in categories.keys():
elif story['source'] == 'manual': res = categories[story['source']].story(story['ref'], urlref)
res = manual.story(story['ref']) elif story['source'] in sitemaps.keys():
except BaseException as e: res = sitemaps[story['source']].story(story['ref'], urlref)
utils.alert_tanner('Problem updating {} story, ref {}: {}'.format(story['source'], story['ref'], str(e))) elif story['source'] in substacks.keys():
logging.exception(e) res = substacks[story['source']].story(story['ref'])
return False elif story['source'] == 'manual':
res = manual.story(story['ref'])
if res: if res:
story.update(res) # join dicts story.update(res) # join dicts
@@ -91,8 +127,8 @@ def update_story(story, is_manual=False):
logging.info('Story not ready yet') logging.info('Story not ready yet')
return False return False
if story['date'] and not is_manual and story['date'] + TWO_DAYS < time.time(): if story['date'] and not is_manual and story['date'] + settings.MAX_STORY_AGE < time.time():
logging.info('Story too old, removing. Date: {}'.format(story['date'])) logging.info('Story too old, removing')
return False return False
if story.get('url', '') and not story.get('text', ''): if story.get('url', '') and not story.get('text', ''):
@@ -106,12 +142,6 @@ def update_story(story, is_manual=False):
logging.info(story['url']) logging.info(story['url'])
return False return False
if 'trump' in story['title'].lower() or 'musk' in story['title'].lower() or 'Removed by moderator' in story['title']:
logging.info('Trump / Musk / removed story, skipping')
logging.info(story['url'])
return False
logging.info('Getting article ' + story['url']) logging.info('Getting article ' + story['url'])
story['text'] = get_article(story['url']) story['text'] = get_article(story['url'])
if not story['text']: return False if not story['text']: return False
@@ -129,7 +159,7 @@ if __name__ == '__main__':
#print(get_article('https://www.bloomberg.com/news/articles/2019-09-23/xi-s-communists-under-pressure-as-high-prices-hit-china-workers')) #print(get_article('https://www.bloomberg.com/news/articles/2019-09-23/xi-s-communists-under-pressure-as-high-prices-hit-china-workers'))
a = get_content_type('https://tefkos.comminfo.rutgers.edu/Courses/e530/Readings/Beal%202008%20full%20text%20searching.pdf') a = get_article('https://blog.joinmastodon.org/2019/10/mastodon-3.0/')
print(a) print(a)
print('done') print('done')

View File

@@ -0,0 +1,72 @@
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
if __name__ == '__main__':
import sys
sys.path.insert(0,'.')
from bs4 import BeautifulSoup
import settings
from utils import clean
from misc.api import xml
from misc.news import Base
def _filter_links(links, category_url, excludes=None):
links = list(filter(None, [link if link.startswith(category_url) else None for link in links]))
links = list(filter(None, [link if link != category_url else None for link in links]))
links = list(set(links))
if excludes:
links = list(filter(None, [None if any(e in link for e in excludes) else link for link in links]))
return links
def _get_category(category_url, excludes=None):
base_url = '/'.join(category_url.split('/')[:3])
markup = xml(lambda x: category_url)
if not markup: return []
soup = BeautifulSoup(markup, features='html.parser')
links = soup.find_all('a', href=True)
links = [link.get('href') for link in links]
links = [f"{base_url}{link}" if link.startswith('/') else link for link in links]
links = _filter_links(links, category_url, excludes)
return links
class Category(Base):
def __init__(self, config):
self.config = config
self.category_url = config.get('url')
self.tz = config.get('tz')
def feed(self, excludes=None):
links = []
if isinstance(self.category_url, str):
links += _get_category(self.category_url, excludes)
elif isinstance(self.category_url, list):
for url in self.category_url:
links += _get_category(url, excludes)
links = list(set(links))
return [(self.get_id(link), link) for link in links]
# scratchpad so I can quickly develop the parser
if __name__ == '__main__':
print("Category: RadioNZ")
site = Category("https://www.rnz.co.nz/news/")
excludes = [
'rnz.co.nz/news/sport',
'rnz.co.nz/weather',
'rnz.co.nz/news/weather',
]
posts = site.feed(excludes)
print(posts[:5])
print(site.story(posts[0]))
print("Category: Newsroom")
site = Category("https://www.newsroom.co.nz/news/", tz='Pacific/Auckland')
posts = site.feed()
print(posts[:5])
print(site.story(posts[0]))

View File

@@ -12,8 +12,7 @@ import requests
from utils import clean from utils import clean
API_TOPSTORIES = lambda x: 'https://hacker-news.firebaseio.com/v0/topstories.json' API_TOPSTORIES = lambda x: 'https://hacker-news.firebaseio.com/v0/topstories.json'
ALG_API_ITEM = lambda x : 'https://hn.algolia.com/api/v1/items/{}'.format(x) API_ITEM = lambda x : 'https://hn.algolia.com/api/v1/items/{}'.format(x)
BHN_API_ITEM = lambda x : 'https://api.hnpwa.com/v0/item/{}.json'.format(x)
SITE_LINK = lambda x : 'https://news.ycombinator.com/item?id={}'.format(x) SITE_LINK = lambda x : 'https://news.ycombinator.com/item?id={}'.format(x)
SITE_AUTHOR_LINK = lambda x : 'https://news.ycombinator.com/user?id={}'.format(x) SITE_AUTHOR_LINK = lambda x : 'https://news.ycombinator.com/user?id={}'.format(x)
@@ -43,7 +42,7 @@ def api(route, ref=None):
def feed(): def feed():
return [str(x) for x in api(API_TOPSTORIES) or []] return [str(x) for x in api(API_TOPSTORIES) or []]
def alg_comment(i): def comment(i):
if 'author' not in i: if 'author' not in i:
return False return False
@@ -52,25 +51,21 @@ def alg_comment(i):
c['score'] = i.get('points', 0) c['score'] = i.get('points', 0)
c['date'] = i.get('created_at_i', 0) c['date'] = i.get('created_at_i', 0)
c['text'] = clean(i.get('text', '') or '') c['text'] = clean(i.get('text', '') or '')
c['comments'] = [alg_comment(j) for j in i['children']] c['comments'] = [comment(j) for j in i['children']]
c['comments'] = list(filter(bool, c['comments'])) c['comments'] = list(filter(bool, c['comments']))
return c return c
def alg_comment_count(i): def comment_count(i):
alive = 1 if i['author'] else 0 alive = 1 if i['author'] else 0
return sum([alg_comment_count(c) for c in i['comments']]) + alive return sum([comment_count(c) for c in i['comments']]) + alive
def alg_story(ref): def story(ref):
r = api(ALG_API_ITEM, ref) r = api(API_ITEM, ref)
if not r: if not r: return False
logging.info('Bad Algolia Hackernews API response.')
return None
if 'deleted' in r: if 'deleted' in r:
logging.info('Story was deleted.')
return False return False
elif r.get('type', '') != 'story': elif r.get('type', '') != 'story':
logging.info('Type "{}" is not "story".'.format(r.get('type', '')))
return False return False
s = {} s = {}
@@ -81,88 +76,17 @@ def alg_story(ref):
s['title'] = r.get('title', '') s['title'] = r.get('title', '')
s['link'] = SITE_LINK(ref) s['link'] = SITE_LINK(ref)
s['url'] = r.get('url', '') s['url'] = r.get('url', '')
s['comments'] = [alg_comment(i) for i in r['children']] s['comments'] = [comment(i) for i in r['children']]
s['comments'] = list(filter(bool, s['comments'])) s['comments'] = list(filter(bool, s['comments']))
s['num_comments'] = alg_comment_count(s) - 1 s['num_comments'] = comment_count(s) - 1
if 'text' in r and r['text']: if 'text' in r and r['text']:
s['text'] = clean(r['text'] or '') s['text'] = clean(r['text'] or '')
return s return s
def bhn_comment(i):
if 'user' not in i:
return False
c = {}
c['author'] = i.get('user', '')
c['score'] = 0 # Not present?
c['date'] = i.get('time', 0)
c['text'] = clean(i.get('content', '') or '')
c['comments'] = [bhn_comment(j) for j in i['comments']]
c['comments'] = list(filter(bool, c['comments']))
return c
def bhn_story(ref):
r = api(BHN_API_ITEM, ref)
if not r:
logging.info('Bad BetterHN Hackernews API response.')
return None
if 'deleted' in r: # TODO: verify
logging.info('Story was deleted.')
return False
elif r.get('dead', False):
logging.info('Story was deleted.')
return False
elif r.get('type', '') != 'link':
logging.info('Type "{}" is not "link".'.format(r.get('type', '')))
return False
s = {}
s['author'] = r.get('user', '')
s['author_link'] = SITE_AUTHOR_LINK(r.get('user', ''))
s['score'] = r.get('points', 0)
s['date'] = r.get('time', 0)
s['title'] = r.get('title', '')
s['link'] = SITE_LINK(ref)
s['url'] = r.get('url', '')
if s['url'].startswith('item'):
s['url'] = SITE_LINK(ref)
s['comments'] = [bhn_comment(i) for i in r['comments']]
s['comments'] = list(filter(bool, s['comments']))
s['num_comments'] = r.get('comments_count', 0)
if 'content' in r and r['content']:
s['text'] = clean(r['content'] or '')
return s
def story(ref):
s = alg_story(ref)
if s is None:
s = bhn_story(ref)
if not s:
return False
if not s['title']:
return False
if s['score'] < 25 and s['num_comments'] < 10:
logging.info('Score ({}) or num comments ({}) below threshold.'.format(s['score'], s['num_comments']))
return False
return s
# scratchpad so I can quickly develop the parser # scratchpad so I can quickly develop the parser
if __name__ == '__main__': if __name__ == '__main__':
print(feed()) print(feed())
#print(story(20763961)) #print(story(20763961))
#print(story(20802050)) #print(story(20802050))
#print(story(42899834)) # type "job"
#print(story(42900076)) # Ask HN
#print(story(42898201)) # Show HN
#print(story(42899703)) # normal
print(story(42902678)) # bad title?

View File

@@ -1,120 +0,0 @@
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
if __name__ == '__main__':
import sys
sys.path.insert(0,'.')
import requests
from datetime import datetime
from utils import clean
API_HOTTEST = lambda x: 'https://lobste.rs/hottest.json'
API_ITEM = lambda x : 'https://lobste.rs/s/{}.json'.format(x)
SITE_LINK = lambda x : 'https://lobste.rs/s/{}'.format(x)
SITE_AUTHOR_LINK = lambda x : 'https://lobste.rs/u/{}'.format(x)
def api(route, ref=None):
try:
r = requests.get(route(ref), timeout=5)
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem hitting lobsters API: {}, trying again'.format(str(e)))
try:
r = requests.get(route(ref), timeout=15)
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem hitting lobsters API: {}'.format(str(e)))
return False
def feed():
return [x['short_id'] for x in api(API_HOTTEST) or []]
def unix(date_str):
date_str = date_str.replace(':', '')
return int(datetime.strptime(date_str, '%Y-%m-%dT%H%M%S.%f%z').timestamp())
def make_comment(i):
c = {}
try:
c['author'] = i['commenting_user']
except KeyError:
c['author'] = ''
c['score'] = i.get('score', 0)
try:
c['date'] = unix(i['created_at'])
except KeyError:
c['date'] = 0
c['text'] = clean(i.get('comment', '') or '')
c['comments'] = []
return c
def iter_comments(flat_comments):
nested_comments = []
parent_stack = []
for comment in flat_comments:
c = make_comment(comment)
indent = comment['depth']
if indent == 0:
nested_comments.append(c)
parent_stack = [c]
else:
parent_stack = parent_stack[:indent]
p = parent_stack[-1]
p['comments'].append(c)
parent_stack.append(c)
return nested_comments
def story(ref):
r = api(API_ITEM, ref)
if not r:
logging.info('Bad Lobsters API response.')
return False
s = {}
try:
s['author'] = r['submitter_user']
s['author_link'] = SITE_AUTHOR_LINK(s['author'])
except KeyError:
s['author'] = ''
s['author_link'] = ''
s['score'] = r.get('score', 0)
try:
s['date'] = unix(r['created_at'])
except KeyError:
s['date'] = 0
s['title'] = r.get('title', '')
s['link'] = SITE_LINK(ref)
s['url'] = r.get('url', '')
s['comments'] = iter_comments(r['comments'])
s['num_comments'] = r['comment_count']
if s['score'] < 15 and s['num_comments'] < 10:
logging.info('Score ({}) or num comments ({}) below threshold.'.format(s['score'], s['num_comments']))
return False
if 'description' in r and r['description']:
s['text'] = clean(r['description'] or '')
return s
# scratchpad so I can quickly develop the parser
if __name__ == '__main__':
#print(feed())
import json
print(json.dumps(story('fzvd1v'), indent=4))
#print(json.dumps(story('ixyv5u'), indent=4))

View File

@@ -7,6 +7,8 @@ import requests
import time import time
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import settings
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0' USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0'
def api(route): def api(route):
@@ -27,15 +29,13 @@ def api(route):
def story(ref): def story(ref):
html = api(ref) html = api(ref)
if not html: if not html: return False
logging.info('Bad http GET response.')
return False
soup = BeautifulSoup(html, features='html.parser') soup = BeautifulSoup(html, features='html.parser')
s = {} s = {}
s['author'] = 'manual submission' s['author'] = 'manual submission'
s['author_link'] = 'https://news.t0.vc' s['author_link'] = 'https://{}'.format(settings.HOSTNAME)
s['score'] = 0 s['score'] = 0
s['date'] = int(time.time()) s['date'] = int(time.time())
s['title'] = str(soup.title.string) if soup.title else ref s['title'] = str(soup.title.string) if soup.title else ref

View File

@@ -32,8 +32,11 @@ def feed():
return [x.id for x in reddit.subreddit(subs).hot()] return [x.id for x in reddit.subreddit(subs).hot()]
except KeyboardInterrupt: except KeyboardInterrupt:
raise raise
except BaseException as e: except PRAWException as e:
logging.critical('Problem hitting reddit API: {}'.format(str(e))) logging.error('Problem hitting reddit API: {}'.format(str(e)))
return []
except PrawcoreException as e:
logging.error('Problem hitting reddit API: {}'.format(str(e)))
return [] return []
def comment(i): def comment(i):
@@ -56,9 +59,7 @@ def comment(i):
def story(ref): def story(ref):
try: try:
r = reddit.submission(ref) r = reddit.submission(ref)
if not r: if not r: return False
logging.info('Bad Reddit API response.')
return False
s = {} s = {}
s['author'] = r.author.name if r.author else '[Deleted]' s['author'] = r.author.name if r.author else '[Deleted]'
@@ -72,8 +73,7 @@ def story(ref):
s['comments'] = list(filter(bool, s['comments'])) s['comments'] = list(filter(bool, s['comments']))
s['num_comments'] = r.num_comments s['num_comments'] = r.num_comments
if s['score'] < 25 and s['num_comments'] < 10: if s['score'] < settings.REDDIT_SCORE_THRESHOLD and s['num_comments'] < settings.REDDIT_COMMENT_THRESHOLD:
logging.info('Score ({}) or num comments ({}) below threshold.'.format(s['score'], s['num_comments']))
return False return False
if r.selftext: if r.selftext:
@@ -84,10 +84,10 @@ def story(ref):
except KeyboardInterrupt: except KeyboardInterrupt:
raise raise
except PRAWException as e: except PRAWException as e:
logging.critical('Problem hitting reddit API: {}'.format(str(e))) logging.error('Problem hitting reddit API: {}'.format(str(e)))
return False return False
except PrawcoreException as e: except PrawcoreException as e:
logging.critical('Problem hitting reddit API: {}'.format(str(e))) logging.error('Problem hitting reddit API: {}'.format(str(e)))
return False return False
# scratchpad so I can quickly develop the parser # scratchpad so I can quickly develop the parser

View File

@@ -0,0 +1,99 @@
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
if __name__ == '__main__':
import sys
sys.path.insert(0,'.')
from datetime import datetime
from bs4 import BeautifulSoup
import settings
from utils import clean
from misc.time import unix
from misc.api import xml
from misc.news import Base
def _get_sitemap_date(a):
if a.find('lastmod'):
return a.find('lastmod').text
if a.find('news:publication_date'):
return a.find('news:publication_date').text
if a.find('ns2:publication_date'):
return a.find('ns2:publication_date').text
return ''
def _filter_links(links, excludes=None):
too_old = datetime.now().timestamp() - settings.MAX_STORY_AGE
links = list(filter(None, [a if _get_sitemap_date(a) else None for a in links]))
links = list(filter(None, [a if unix(_get_sitemap_date(a)) > too_old else None for a in links]))
links.sort(key=lambda a: unix(_get_sitemap_date(a)), reverse=True)
links = [x.find('loc').text for x in links] or []
links = list(set(links))
if excludes:
links = list(filter(None, [None if any(e in link for e in excludes) else link for link in links]))
return links
def _get_sitemap(feed_url, excludes=None):
markup = xml(lambda x: feed_url)
if not markup: return []
soup = BeautifulSoup(markup, features='lxml')
links = []
feed_urls = []
if soup.find('sitemapindex'):
sitemap = soup.find('sitemapindex').findAll('sitemap')
feed_urls = list(filter(None, [a if a.find('loc') else None for a in sitemap]))
if soup.find('urlset'):
sitemap = soup.find('urlset').findAll('url')
links = list(filter(None, [a if a.find('loc') else None for a in sitemap]))
feed_urls = _filter_links(feed_urls, excludes)
links = _filter_links(links, excludes)
for url in feed_urls:
links += _get_sitemap(url, excludes)
return list(set(links))
class Sitemap(Base):
def __init__(self, config):
self.config = config
self.sitemap_url = config.get('url')
self.tz = config.get('tz')
def feed(self, excludes=None):
links = []
if isinstance(self.sitemap_url, str):
links += _get_sitemap(self.sitemap_url, excludes)
elif isinstance(self.sitemap_url, list):
for url in self.sitemap_url:
links += _get_sitemap(url, excludes)
links = list(set(links))
return [(self.get_id(link), link) for link in links]
# scratchpad so I can quickly develop the parser
if __name__ == '__main__':
print("Sitemap: The Spinoff")
site = Sitemap("https://thespinoff.co.nz/sitemap.xml")
excludes = [
'thespinoff.co.nz/sitemap-misc.xml',
'thespinoff.co.nz/sitemap-authors.xml',
'thespinoff.co.nz/sitemap-tax-category.xml',
]
posts = site.feed(excludes)
print(posts[:5])
print(site.story(posts[0]))
print("Sitemap: Newshub")
site = Sitemap([
'https://www.newshub.co.nz/home/politics.gnewssitemap.xml',
'https://www.newshub.co.nz/home/new-zealand.gnewssitemap.xml',
'https://www.newshub.co.nz/home/world.gnewssitemap.xml',
'https://www.newshub.co.nz/home/money.gnewssitemap.xml',
])
posts = site.feed()
print(posts[:5])
print(site.story(posts[0]))
print(site.story(posts[:-1]))

165
apiserver/feeds/substack.py Normal file
View File

@@ -0,0 +1,165 @@
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
if __name__ == '__main__':
import sys
sys.path.insert(0,'.')
import requests
from datetime import datetime
from utils import clean
SUBSTACK_REFERER = 'https://substack.com'
SUBSTACK_API_TOP_POSTS = lambda x: "https://substack.com/api/v1/reader/top-posts"
def author_link(author_id, base_url):
return f"{base_url}/people/{author_id}"
def api_comments(post_id, base_url):
return f"{base_url}/api/v1/post/{post_id}/comments?all_comments=true&sort=best_first"
def api_stories(x, base_url):
return f"{base_url}/api/v1/archive?sort=new&search=&offset=0&limit=100"
def unix(date_str):
return int(datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S.%fZ').timestamp())
def api(route, ref=None, referer=None):
headers = {'Referer': referer} if referer else None
try:
r = requests.get(route(ref), headers=headers, timeout=10)
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem hitting Substack API: {}, trying again'.format(str(e)))
try:
r = requests.get(route(ref), headers=headers, timeout=20)
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem hitting Substack API: {}'.format(str(e)))
return False
def comment(i):
if 'body' not in i:
return False
c = {}
c['date'] = unix(i.get('date'))
c['author'] = i.get('name', '')
c['score'] = i.get('reactions').get('')
c['text'] = clean(i.get('body', '') or '')
c['comments'] = [comment(j) for j in i['children']]
c['comments'] = list(filter(bool, c['comments']))
return c
class Publication:
def __init__(self, domain):
self.BASE_DOMAIN = domain
def feed(self):
stories = api(lambda x: api_stories(x, self.BASE_DOMAIN), referer=self.BASE_DOMAIN)
if not stories: return []
stories = list(filter(None, [i if i.get("audience") == "everyone" else None for i in stories]))
return [str(i.get("id")) for i in stories or []]
def story(self, ref):
stories = api(lambda x: api_stories(x, self.BASE_DOMAIN), referer=self.BASE_DOMAIN)
if not stories: return False
stories = list(filter(None, [i if i.get("audience") == "everyone" else None for i in stories]))
stories = list(filter(None, [i if str(i.get('id')) == ref else None for i in stories]))
if len(stories) == 0:
return False
r = stories[0]
if not r:
return False
s = {}
s['author'] = ''
s['author_link'] = ''
s['date'] = unix(r.get('post_date'))
s['score'] = r.get('reactions').get('')
s['title'] = r.get('title', '')
s['link'] = r.get('canonical_url', '')
s['url'] = r.get('canonical_url', '')
comments = api(lambda x: api_comments(x, self.BASE_DOMAIN), r.get('id'), referer=self.BASE_DOMAIN)
s['comments'] = [comment(i) for i in comments.get('comments')]
s['comments'] = list(filter(bool, s['comments']))
s['num_comments'] = r.get('comment_count', 0)
authors = list(filter(None, [self._bylines(byline) for byline in r.get('publishedBylines')]))
if len(authors):
s['author'] = authors[0].get('name')
s['author_link'] = authors[0].get('link')
return s
def _bylines(self, b):
if 'id' not in b:
return None
a = {}
a['name'] = b.get('name')
a['link'] = author_link(b.get('id'), self.BASE_DOMAIN)
return a
class Top:
def feed(self):
stories = api(SUBSTACK_API_TOP_POSTS, referer=SUBSTACK_REFERER)
if not stories: return []
stories = list(filter(None, [i if i.get("audience") == "everyone" else None for i in stories]))
return [str(i.get("id")) for i in stories or []]
def story(self, ref):
stories = api(SUBSTACK_API_TOP_POSTS, referer=SUBSTACK_REFERER)
if not stories: return False
stories = list(filter(None, [i if i.get("audience") == "everyone" else None for i in stories]))
stories = list(filter(None, [i if str(i.get('id')) == ref else None for i in stories]))
if len(stories) == 0:
return False
r = stories[0]
if not r:
return False
s = {}
pub = r.get('pub')
base_url = pub.get('base_url')
s['author'] = pub.get('author_name')
s['author_link'] = author_link(pub.get('author_id'), base_url)
s['date'] = unix(r.get('post_date'))
s['score'] = r.get('score')
s['title'] = r.get('title', '')
s['link'] = r.get('canonical_url', '')
s['url'] = r.get('canonical_url', '')
comments = api(lambda x: api_comments(x, base_url), r.get('id'), referer=SUBSTACK_REFERER)
s['comments'] = [comment(i) for i in comments.get('comments')]
s['comments'] = list(filter(bool, s['comments']))
s['num_comments'] = r.get('comment_count', 0)
return s
top = Top()
# scratchpad so I can quickly develop the parser
if __name__ == '__main__':
top_posts = top.feed()
print(top.story(top_posts[0]))
webworm = Publication("https://www.webworm.co/")
posts = webworm.feed()
print(webworm.story(posts[0]))

View File

@@ -34,7 +34,7 @@ def api(route):
except KeyboardInterrupt: except KeyboardInterrupt:
raise raise
except BaseException as e: except BaseException as e:
logging.critical('Problem hitting tildes website: {}'.format(str(e))) logging.error('Problem hitting tildes website: {}'.format(str(e)))
return False return False
def feed(): def feed():
@@ -71,15 +71,11 @@ def story(ref):
html = api(SITE_LINK(group_lookup[ref], ref)) html = api(SITE_LINK(group_lookup[ref], ref))
else: else:
html = api(API_ITEM(ref)) html = api(API_ITEM(ref))
if not html: if not html: return False
logging.info('Bad Tildes API response.')
return False
soup = BeautifulSoup(html, features='html.parser') soup = BeautifulSoup(html, features='html.parser')
a = soup.find('article', class_='topic-full') a = soup.find('article', class_='topic-full')
if a is None: if a is None: return False
logging.info('Tildes <article> element not found.')
return False
h = a.find('header') h = a.find('header')
lu = h.find('a', class_='link-user') lu = h.find('a', class_='link-user')
@@ -87,7 +83,6 @@ def story(ref):
error = a.find('div', class_='text-error') error = a.find('div', class_='text-error')
if error: if error:
if 'deleted' in error.string or 'removed' in error.string: if 'deleted' in error.string or 'removed' in error.string:
logging.info('Article was deleted or removed.')
return False return False
s = {} s = {}
@@ -107,21 +102,7 @@ def story(ref):
ch = a.find('header', class_='topic-comments-header') ch = a.find('header', class_='topic-comments-header')
s['num_comments'] = int(ch.h2.string.split(' ')[0]) if ch else 0 s['num_comments'] = int(ch.h2.string.split(' ')[0]) if ch else 0
if s['group'].split('.')[0] not in [ if s['score'] < 8 and s['num_comments'] < 6:
'~arts',
'~comp',
'~creative',
'~design',
'~engineering',
'~finance',
'~science',
'~tech',
]:
logging.info('Group ({}) not in whitelist.'.format(s['group']))
return False
if s['score'] < 15 and s['num_comments'] < 10:
logging.info('Score ({}) or num comments ({}) below threshold.'.format(s['score'], s['num_comments']))
return False return False
td = a.find('div', class_='topic-full-text') td = a.find('div', class_='topic-full-text')
@@ -132,7 +113,7 @@ def story(ref):
# scratchpad so I can quickly develop the parser # scratchpad so I can quickly develop the parser
if __name__ == '__main__': if __name__ == '__main__':
print(feed()) #print(feed())
#normal = story('gxt') #normal = story('gxt')
#print(normal) #print(normal)
#no_comments = story('gxr') #no_comments = story('gxr')
@@ -141,8 +122,8 @@ if __name__ == '__main__':
#print(self_post) #print(self_post)
#li_comment = story('gqx') #li_comment = story('gqx')
#print(li_comment) #print(li_comment)
#broken = story('q4y') broken = story('q4y')
#print(broken) print(broken)
# make sure there's no self-reference # make sure there's no self-reference
#import copy #import copy

35
apiserver/misc/api.py Normal file
View File

@@ -0,0 +1,35 @@
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
import requests
USER_AGENT = "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
FORWARD_IP = '66.249.66.1'
def xml(route, ref=None):
try:
headers = {'User-Agent': USER_AGENT, 'X-Forwarded-For': FORWARD_IP}
r = requests.get(route(ref), headers=headers, timeout=5)
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.text
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem hitting URL: {}'.format(str(e)))
return False
def json(route, ref=None):
try:
headers = {'User-Agent': USER_AGENT, 'X-Forwarded-For': FORWARD_IP}
r = requests.get(route(ref), headers=headers, timeout=5)
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem hitting URL: {}'.format(str(e)))
return False

View File

@@ -0,0 +1,69 @@
def parse_extruct(s, data):
rdfa_keys = {
'title': [
'http://ogp.me/ns#title',
'https://ogp.me/ns#title',
],
'date': [
'http://ogp.me/ns/article#modified_time',
'https://ogp.me/ns/article#modified_time',
'http://ogp.me/ns/article#published_time',
'https://ogp.me/ns/article#published_time',
]
}
for rdfa in data['rdfa']:
for key, props in rdfa.items():
for attribute, properties in rdfa_keys.items():
for prop in properties:
if prop in props:
for values in props[prop]:
s[attribute] = values['@value']
for og in data['opengraph']:
titles = list(filter(None, [value if 'og:title' in key else None for key, value in og['properties']]))
modified = list(filter(None, [value if 'article:modified_time' in key else None for key, value in og['properties']]))
published = list(filter(None, [value if 'article:published_time' in key else None for key, value in og['properties']]))
if len(modified):
s['date'] = modified[0]
if len(published):
s['date'] = published[0]
if len(titles):
s['title'] = titles[0]
for md in data['microdata']:
if md['type'] in ['https://schema.org/NewsArticle', 'http://schema.org/NewsArticle']:
props = md['properties']
s['title'] = props['headline']
if props['dateModified']:
s['date'] = props['dateModified']
if props['datePublished']:
s['date'] = props['datePublished']
if 'author' in props and props['author']:
if 'properties' in props['author']:
s['author'] = props['author']['properties']['name']
elif isinstance(props['author'], list):
s['author'] = props['author'][0]['properties']['name']
for ld in data['json-ld']:
if '@type' in ld and ld['@type'] in ['Article', 'NewsArticle']:
s['title'] = ld['headline']
if ld['dateModified']:
s['date'] = ld['dateModified']
if ld['datePublished']:
s['date'] = ld['datePublished']
if 'author' in ld and ld['author']:
if 'name' in ld['author']:
s['author'] = ld['author']['name']
elif isinstance(ld['author'], list):
s['author'] = ld['author'][0]['name']
if '@graph' in ld:
for gld in ld['@graph']:
if '@type' in gld and gld['@type'] in ['Article', 'NewsArticle']:
s['title'] = gld['headline']
if gld['dateModified']:
s['date'] = gld['dateModified']
if gld['datePublished']:
s['date'] = gld['datePublished']
return s

101
apiserver/misc/news.py Normal file
View File

@@ -0,0 +1,101 @@
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
import re
import requests
from bs4 import BeautifulSoup
from scrapers import declutter
import extruct
import settings
from utils import clean
from misc.metadata import parse_extruct
from misc.time import unix
from misc.api import xml
def comment(i):
if 'author' not in i:
return False
c = {}
c['author'] = i.get('author', '')
c['score'] = i.get('points', 0)
c['date'] = unix(i.get('date', 0))
c['text'] = clean(i.get('text', '') or '')
c['comments'] = [comment(j) for j in i['children']]
c['comments'] = list(filter(bool, c['comments']))
return c
def comment_count(i):
alive = 1 if i['author'] else 0
return sum([comment_count(c) for c in i['comments']]) + alive
class Base:
def __init__(config):
self.config = config
self.url = config.get('url')
self.tz = config.get('tz')
def get_id(self, link):
patterns = self.config.get('patterns')
if not patterns:
return link
patterns = [re.compile(p) for p in patterns]
patterns = list(filter(None, [p.match(link) for p in patterns]))
patterns = list(set([':'.join(p.groups()) for p in patterns]))
if not patterns:
return link
return patterns[0]
def feed(self, excludes=None):
return []
def story(self, ref, urlref):
if urlref is None:
return False
markup = xml(lambda x: urlref)
if not markup:
return False
s = {}
s['author_link'] = ''
s['score'] = 0
s['comments'] = []
s['num_comments'] = 0
s['link'] = urlref
s['url'] = urlref
s['date'] = 0
soup = BeautifulSoup(markup, features='html.parser')
icon32 = soup.find_all('link', rel="icon", href=True, sizes="32x32")
icon16 = soup.find_all('link', rel="icon", href=True, sizes="16x16")
favicon = soup.find_all('link', rel="shortcut icon", href=True)
others = soup.find_all('link', rel="icon", href=True)
icons = icon32 + icon16 + favicon + others
base_url = '/'.join(urlref.split('/')[:3])
icons = list(set([i.get('href') for i in icons]))
icons = [i if i.startswith('http') else base_url + i for i in icons]
if icons:
s['icon'] = icons[0]
data = extruct.extract(markup)
s = parse_extruct(s, data)
if s['date']:
s['date'] = unix(s['date'], tz=self.tz)
if 'disqus' in markup:
try:
s['comments'] = declutter.get_comments(urlref)
c['comments'] = list(filter(bool, c['comments']))
s['num_comments'] = comment_count(s['comments'])
except KeyboardInterrupt:
raise
except:
pass
if not s['date']:
return False
return s

18
apiserver/misc/time.py Normal file
View File

@@ -0,0 +1,18 @@
import pytz
import dateutil.parser
TZINFOS = {
'NZDT': pytz.timezone('Pacific/Auckland'),
'NZST': pytz.timezone('Pacific/Auckland')
}
def unix(date_str, tz=None, tzinfos=TZINFOS):
try:
dt = dateutil.parser.parse(date_str, tzinfos=tzinfos)
if tz:
dt = pytz.timezone(tz).localize(dt)
return int(dt.timestamp())
except:
pass
return 0

View File

@@ -4,19 +4,21 @@ certifi==2020.6.20
chardet==3.0.4 chardet==3.0.4
click==7.1.2 click==7.1.2
commonmark==0.9.1 commonmark==0.9.1
extruct==0.10.0
Flask==1.1.2 Flask==1.1.2
Flask-Cors==3.0.8 Flask-Cors==3.0.8
gevent==20.6.2 gevent==20.6.2
greenlet==0.4.16 greenlet==0.4.16
humanize==4.10.0
idna==2.10 idna==2.10
itsdangerous==1.1.0 itsdangerous==1.1.0
Jinja2==2.11.2 Jinja2==2.11.2
lxml==4.6.1
MarkupSafe==1.1.1 MarkupSafe==1.1.1
packaging==20.4 packaging==20.4
praw==6.4.0 praw==6.4.0
prawcore==1.4.0 prawcore==1.4.0
pyparsing==2.4.7 pyparsing==2.4.7
pytz==2020.4
requests==2.24.0 requests==2.24.0
six==1.15.0 six==1.15.0
soupsieve==2.0.1 soupsieve==2.0.1
@@ -28,3 +30,4 @@ websocket-client==0.57.0
Werkzeug==1.0.1 Werkzeug==1.0.1
zope.event==4.4 zope.event==4.4
zope.interface==5.1.0 zope.interface==5.1.0
python-dateutil==2.8.1

View File

@@ -0,0 +1,41 @@
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
import requests
DECLUTTER_API = 'https://declutter.1j.nz/details'
DECLUTTER_COMMENT_API = 'https://declutter.1j.nz/comments'
TIMEOUT = 30
def get_html(url):
logging.info(f"Declutter Scraper: {url}")
details = get_details(url)
if not details:
return ''
return details['content']
def get_details(url):
try:
r = requests.post(DECLUTTER_API, data=dict(url=url), timeout=TIMEOUT)
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem decluttering article: {}'.format(str(e)))
return None
def get_comments(url):
try:
r = requests.post(DECLUTTER_COMMENT_API, data=dict(url=url), timeout=TIMEOUT)
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem getting comments for article: {}'.format(str(e)))
return None

View File

@@ -0,0 +1,42 @@
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
import requests
from settings import READER_PORT
READ_API = 'http://127.0.0.1:{}/headless/details'.format(READER_PORT or 3000)
READ_COMMENT__API = 'http://127.0.0.1:{}/headless/comments'.format(READER_PORT or 3000)
TIMEOUT = 60
def get_html(url):
logging.info(f"Headless Browser Scraper: {url}")
details = get_details(url)
if not details:
return ''
return details['content']
def get_details(url):
try:
r = requests.post(READ_API, data=dict(url=url), timeout=TIMEOUT)
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem scraping article: {}'.format(str(e)))
return None
def get_comments(url):
try:
r = requests.post(READ_COMMENT_API, data=dict(url=url), timeout=TIMEOUT)
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem getting comments for article: {}'.format(str(e)))
return None

View File

@@ -0,0 +1,37 @@
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
import requests
OUTLINE_REFERER = 'https://outline.com/'
OUTLINE_API = 'https://api.outline.com/v3/parse_article'
TIMEOUT = 20
def get_html(url):
details = get_details(url)
if not details:
return ''
return details['html']
def get_details(url):
try:
logging.info(f"Outline Scraper: {url}")
params = {'source_url': url}
headers = {'Referer': OUTLINE_REFERER}
r = requests.get(OUTLINE_API, params=params, headers=headers, timeout=TIMEOUT)
if r.status_code == 429:
logging.info('Rate limited by outline, sleeping 30s and skipping...')
time.sleep(30)
return None
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
data = r.json()['data']
if 'URL is not supported by Outline' in data['html']:
raise Exception('URL not supported by Outline')
return data
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem outlining article: {}'.format(str(e)))
return None

View File

@@ -0,0 +1,28 @@
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
import requests
from settings import READER_PORT
READ_API = 'http://127.0.0.1:{}/simple/details'.format(READER_PORT or 3000)
TIMEOUT = 20
def get_html(url):
logging.info(f"Simple Scraper: {url}")
details = get_details(url)
if not details:
return ''
return details['content']
def get_details(url):
try:
r = requests.post(READ_API, data=dict(url=url), timeout=TIMEOUT)
if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem getting article: {}'.format(str(e)))
return None

View File

@@ -1,58 +0,0 @@
import time
import json
import logging
import feed
import database
import search
database.init()
def fix_gzip_bug(story_list):
FIX_THRESHOLD = 150
count = 1
for sid in story_list:
try:
sid = sid[0]
story = database.get_story(sid)
full_json = json.loads(story.full_json)
meta_json = json.loads(story.meta_json)
text = full_json.get('text', '')
count = text.count('<EFBFBD>')
if not count: continue
ratio = count / len(text) * 1000
print('Bad story:', sid, 'Num ?:', count, 'Ratio:', ratio)
if ratio < FIX_THRESHOLD: continue
print('Attempting to fix...')
valid = feed.update_story(meta_json, is_manual=True)
if valid:
database.put_story(meta_json)
search.put_story(meta_json)
print('Success')
else:
print('Story was not valid')
time.sleep(3)
except KeyboardInterrupt:
raise
except BaseException as e:
logging.exception(e)
breakpoint()
if __name__ == '__main__':
num_stories = database.count_stories()
print('Fix {} stories?'.format(num_stories))
print('Press ENTER to continue, ctrl-c to cancel')
input()
story_list = database.get_story_list()
fix_gzip_bug(story_list)

View File

@@ -1,62 +0,0 @@
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
import database
from sqlalchemy import select
import search
import sys
import time
import json
import requests
database.init()
search.init()
BATCH_SIZE = 5000
def put_stories(stories):
return search.meili_api(requests.post, 'indexes/qotnews/documents', stories)
def get_update(update_id):
return search.meili_api(requests.get, 'tasks/{}'.format(update_id))
if __name__ == '__main__':
num_stories = database.count_stories()
print('Reindex {} stories?'.format(num_stories))
print('Press ENTER to continue, ctrl-c to cancel')
input()
story_list = database.get_story_list()
count = 1
while len(story_list):
stories = []
for _ in range(BATCH_SIZE):
try:
sid = story_list.pop()
except IndexError:
break
story = database.get_story(sid)
print('Indexing {}/{} id: {} title: {}'.format(count, num_stories, sid[0], story.title))
story_obj = json.loads(story.meta_json)
stories.append(story_obj)
count += 1
res = put_stories(stories)
update_id = res['uid']
print('Waiting for processing', end='')
while get_update(update_id)['status'] != 'succeeded':
time.sleep(0.5)
print('.', end='', flush=True)
print()
print('Done.')

View File

@@ -1,23 +0,0 @@
import time
import requests
def test_search_api():
num_tests = 100
total_time = 0
for i in range(num_tests):
start = time.time()
res = requests.get('http://127.0.0.1:33842/api/search?q=iphone')
res.raise_for_status()
duration = time.time() - start
total_time += duration
avg_time = total_time / num_tests
print('Average search time:', avg_time)
if __name__ == '__main__':
test_search_api()

View File

@@ -4,62 +4,83 @@ logging.basicConfig(
level=logging.DEBUG) level=logging.DEBUG)
import requests import requests
import settings
SEARCH_ENABLED = bool(settings.MEILI_URL) MEILI_URL = 'http://127.0.0.1:7700/'
def meili_api(method, route, json=None, params=None, parse_json=True): def create_index():
try: try:
r = method(settings.MEILI_URL + route, json=json, params=params, timeout=4) json = dict(name='qotnews', uid='qotnews')
if r.status_code > 299: r = requests.post(MEILI_URL + 'indexes', json=json, timeout=2)
if r.status_code != 201:
raise Exception('Bad response code ' + str(r.status_code)) raise Exception('Bad response code ' + str(r.status_code))
if parse_json: return r.json()
return r.json()
else:
r.encoding = 'utf-8'
return r.text
except KeyboardInterrupt: except KeyboardInterrupt:
raise raise
except BaseException as e: except BaseException as e:
logging.error('Problem with MeiliSearch api route: %s: %s', route, str(e)) logging.error('Problem creating MeiliSearch index: {}'.format(str(e)))
return False return False
def create_index():
json = dict(uid='qotnews', primaryKey='id')
return meili_api(requests.post, 'indexes', json=json)
def update_rankings(): def update_rankings():
json = ['typo', 'words', 'proximity', 'date:desc', 'exactness'] try:
return meili_api(requests.post, 'indexes/qotnews/settings/ranking-rules', json=json) json = ['typo', 'words', 'proximity', 'attribute', 'desc(date)', 'wordsPosition', 'exactness']
r = requests.post(MEILI_URL + 'indexes/qotnews/settings/ranking-rules', json=json, timeout=2)
if r.status_code != 202:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem setting MeiliSearch ranking rules: {}'.format(str(e)))
return False
def update_attributes(): def update_attributes():
json = ['title', 'url', 'author'] try:
r = meili_api(requests.post, 'indexes/qotnews/settings/searchable-attributes', json=json) json = ['title', 'url', 'author', 'link', 'id', 'source']
json = ['id', 'ref', 'source', 'author', 'author_link', 'score', 'date', 'title', 'link', 'url', 'num_comments'] r = requests.post(MEILI_URL + 'indexes/qotnews/settings/searchable-attributes', json=json, timeout=2)
r = meili_api(requests.post, 'indexes/qotnews/settings/displayed-attributes', json=json) if r.status_code != 202:
return r raise Exception('Bad response code ' + str(r.status_code))
requests.delete(MEILI_URL + 'indexes/qotnews/settings/displayed-attributes', timeout=2)
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem setting MeiliSearch searchable attributes: {}'.format(str(e)))
return False
def init(): def init():
if not SEARCH_ENABLED: create_index()
logging.info('Search is not enabled, skipping init.')
return
print(create_index())
update_rankings() update_rankings()
update_attributes() update_attributes()
def put_story(story): def put_story(story):
if not SEARCH_ENABLED: return story = story.copy()
return meili_api(requests.post, 'indexes/qotnews/documents', [story]) story.pop('text', None)
story.pop('comments', None)
try:
r = requests.post(MEILI_URL + 'indexes/qotnews/documents', json=[story], timeout=2)
if r.status_code != 202:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem putting MeiliSearch story: {}'.format(str(e)))
return False
def search(q): def search(q):
if not SEARCH_ENABLED: return [] try:
params = dict(q=q, limit=settings.FEED_LENGTH) params = dict(q=q, limit=250)
r = meili_api(requests.get, 'indexes/qotnews/search', params=params, parse_json=False) r = requests.get(MEILI_URL + 'indexes/qotnews/search', params=params, timeout=2)
return r if r.status_code != 200:
raise Exception('Bad response code ' + str(r.status_code))
return r.json()['hits']
except KeyboardInterrupt:
raise
except BaseException as e:
logging.error('Problem searching MeiliSearch: {}'.format(str(e)))
return False
if __name__ == '__main__': if __name__ == '__main__':
init() create_index()
print(update_rankings()) print(search('the'))
print(search('facebook'))

View File

@@ -1,8 +1,7 @@
import os, logging import logging
DEBUG = os.environ.get('DEBUG')
logging.basicConfig( logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG if DEBUG else logging.INFO) level=logging.INFO)
import gevent import gevent
from gevent import monkey from gevent import monkey
@@ -14,146 +13,53 @@ import json
import threading import threading
import traceback import traceback
import time import time
import datetime
import humanize
import urllib.request
from urllib.parse import urlparse, parse_qs from urllib.parse import urlparse, parse_qs
import settings import settings
import database import database
import search import search
import feed import feed
from utils import gen_rand_id, NUM_ID_CHARS from utils import gen_rand_id
from flask import abort, Flask, request, render_template, stream_with_context, Response from flask import abort, Flask, request, render_template, stream_with_context, Response
from werkzeug.exceptions import NotFound from werkzeug.exceptions import NotFound
from flask_cors import CORS from flask_cors import CORS
smallweb_set = set()
def load_smallweb_list():
EXCLUDED = [
'github.com',
]
global smallweb_set
try:
url = 'https://raw.githubusercontent.com/kagisearch/smallweb/refs/heads/main/smallweb.txt'
with urllib.request.urlopen(url, timeout=10) as response:
urls = response.read().decode('utf-8').splitlines()
hosts = {urlparse(u).hostname for u in urls if u and urlparse(u).hostname}
smallweb_set = {h.replace('www.', '') for h in hosts if h not in EXCLUDED}
logging.info('Loaded {} smallweb domains.'.format(len(smallweb_set)))
except Exception as e:
logging.error('Failed to load smallweb list: {}'.format(e))
load_smallweb_list()
database.init() database.init()
search.init() search.init()
news_index = 0
ref_list = []
current_item = {}
def new_id(): def new_id():
nid = gen_rand_id() nid = gen_rand_id()
while database.get_story(nid): while database.get_story(nid):
nid = gen_rand_id() nid = gen_rand_id()
return nid return nid
build_folder = '../webclient/build'
def fromnow(ts):
return humanize.naturaltime(datetime.datetime.fromtimestamp(ts))
build_folder = './build'
flask_app = Flask(__name__, template_folder=build_folder, static_folder=build_folder, static_url_path='') flask_app = Flask(__name__, template_folder=build_folder, static_folder=build_folder, static_url_path='')
flask_app.jinja_env.filters['fromnow'] = fromnow
cors = CORS(flask_app) cors = CORS(flask_app)
@flask_app.route('/api') @flask_app.route('/api')
def api(): def api():
skip = request.args.get('skip', 0) stories = database.get_stories(settings.MAX_STORY_AGE)
limit = request.args.get('limit', settings.FEED_LENGTH) res = Response(json.dumps({"stories": stories}))
if request.args.get('smallweb') == 'true' and smallweb_set:
limit = int(limit)
skip = int(skip)
filtered_stories = []
current_skip = skip
while len(filtered_stories) < limit:
stories_batch = database.get_stories(limit, current_skip)
if not stories_batch:
break
for story_str in stories_batch:
story = json.loads(story_str)
story_url = story.get('url') or story.get('link') or ''
if not story_url:
continue
hostname = urlparse(story_url).hostname
if hostname:
hostname = hostname.replace('www.', '')
if hostname in smallweb_set:
filtered_stories.append(story_str)
if len(filtered_stories) == limit:
break
if len(filtered_stories) == limit:
break
current_skip += limit
stories = filtered_stories
else:
stories = database.get_stories(limit, skip)
# hacky nested json
res = Response('{"stories":[' + ','.join(stories) + ']}')
res.headers['content-type'] = 'application/json' res.headers['content-type'] = 'application/json'
return res return res
@flask_app.route('/api/stats', strict_slashes=False)
def apistats():
stats = {
'news_index': news_index,
'ref_list': ref_list,
'len_ref_list': len(ref_list),
'current_item': current_item,
'total_stories': database.count_stories(),
'id_space': 26**NUM_ID_CHARS,
}
return stats
@flask_app.route('/api/search', strict_slashes=False) @flask_app.route('/api/search', strict_slashes=False)
def apisearch(): def apisearch():
q = request.args.get('q', '') q = request.args.get('q', '')
if len(q) >= 3: if len(q) >= 3:
results = search.search(q) results = search.search(q)
else: else:
results = '[]' results = []
res = Response(results) return dict(results=results)
res.headers['content-type'] = 'application/json'
return res
@flask_app.route('/api/submit', methods=['POST'], strict_slashes=False) @flask_app.route('/api/submit', methods=['POST'], strict_slashes=False)
def submit(): def submit():
try: try:
url = request.form['url'] url = request.form['url']
for prefix in ['http://', 'https://']:
if url.lower().startswith(prefix):
break
else: # for
url = 'http://' + url
nid = new_id() nid = new_id()
logging.info('Manual submission: ' + url)
parse = urlparse(url) parse = urlparse(url)
if 'news.ycombinator.com' in parse.hostname: if 'news.ycombinator.com' in parse.hostname:
source = 'hackernews' source = 'hackernews'
@@ -161,24 +67,16 @@ def submit():
elif 'tildes.net' in parse.hostname and '~' in url: elif 'tildes.net' in parse.hostname and '~' in url:
source = 'tildes' source = 'tildes'
ref = parse.path.split('/')[2] ref = parse.path.split('/')[2]
elif 'lobste.rs' in parse.hostname and '/s/' in url:
source = 'lobsters'
ref = parse.path.split('/')[2]
elif 'reddit.com' in parse.hostname and 'comments' in url: elif 'reddit.com' in parse.hostname and 'comments' in url:
source = 'reddit' source = 'reddit'
ref = parse.path.split('/')[4] ref = parse.path.split('/')[4]
elif 'news.t0.vc' in parse.hostname: elif settings.HOSTNAME in parse.hostname:
raise Exception('Invalid article') raise Exception('Invalid article')
else: else:
source = 'manual' source = 'manual'
ref = url ref = url
existing = database.get_story_by_ref(ref) existing = database.get_story_by_ref(ref)
if existing and DEBUG:
ref = ref + '#' + str(time.time())
existing = False
if existing: if existing:
return {'nid': existing.sid} return {'nid': existing.sid}
else: else:
@@ -187,28 +85,23 @@ def submit():
if valid: if valid:
database.put_story(story) database.put_story(story)
search.put_story(story) search.put_story(story)
if DEBUG:
logging.info('Adding manual ref: {}, id: {}, source: {}'.format(ref, nid, source))
database.put_ref(ref, nid, source)
return {'nid': nid} return {'nid': nid}
else: else:
raise Exception('Invalid article') raise Exception('Invalid article')
except Exception as e: except BaseException as e:
msg = 'Problem with article submission: {} - {}'.format(e.__class__.__name__, str(e)) logging.error('Problem with article submission: {} - {}'.format(e.__class__.__name__, str(e)))
logging.error(msg)
print(traceback.format_exc()) print(traceback.format_exc())
return {'error': msg.split('\n')[0]}, 400 abort(400)
@flask_app.route('/api/<sid>') @flask_app.route('/api/<sid>')
def story(sid): def story(sid):
story = database.get_story(sid) story = database.get_story(sid)
if story: if story:
# hacky nested json related = database.get_stories_by_url(story.meta['url'])
res = Response('{"story":' + story.full_json + '}') related = [r.meta for r in related]
res = Response(json.dumps({"story": story.data, "related": related}))
res.headers['content-type'] = 'application/json' res.headers['content-type'] = 'application/json'
return res return res
else: else:
@@ -217,19 +110,10 @@ def story(sid):
@flask_app.route('/') @flask_app.route('/')
@flask_app.route('/search') @flask_app.route('/search')
def index(): def index():
stories_json = database.get_stories(settings.FEED_LENGTH, 0)
stories = [json.loads(s) for s in stories_json]
for s in stories:
url = urlparse(s.get('url') or s.get('link') or '').hostname or ''
s['hostname'] = url.replace('www.', '')
return render_template('index.html', return render_template('index.html',
title='QotNews', title='Feed',
url='news.t0.vc', url=settings.HOSTNAME,
description='Hacker News, Reddit, Lobsters, and Tildes articles rendered in reader mode', description='Reddit, Hacker News, and Tildes combined, then pre-rendered in reader mode')
robots='index',
stories=stories,
)
@flask_app.route('/<sid>', strict_slashes=False) @flask_app.route('/<sid>', strict_slashes=False)
@flask_app.route('/<sid>/c', strict_slashes=False) @flask_app.route('/<sid>/c', strict_slashes=False)
@@ -239,9 +123,9 @@ def static_story(sid):
except NotFound: except NotFound:
pass pass
story_obj = database.get_story(sid) story = database.get_story(sid)
if not story_obj: return abort(404) if not story: return abort(404)
story = json.loads(story_obj.full_json) story = story.data
score = story['score'] score = story['score']
num_comments = story['num_comments'] num_comments = story['num_comments']
@@ -250,77 +134,69 @@ def static_story(sid):
score, 's' if score != 1 else '', score, 's' if score != 1 else '',
num_comments, 's' if num_comments != 1 else '', num_comments, 's' if num_comments != 1 else '',
source) source)
url = urlparse(story.get('url') or story.get('link') or '').hostname or '' url = urlparse(story['url']).hostname or urlparse(story['link']).hostname or ''
url = url.replace('www.', '') url = url.replace('www.', '')
return render_template('index.html', return render_template('index.html',
title=story['title'] + ' | QotNews', title=story['title'],
url=url, url=url,
description=description, description=description)
robots='noindex',
story=story,
show_comments=request.path.endswith('/c'),
)
http_server = WSGIServer(('', 33842), flask_app) http_server = WSGIServer(('', settings.API_PORT or 33842), flask_app)
def _add_new_refs():
for ref, source, urlref in feed.get_list():
if database.get_story_by_ref(ref):
continue
try:
nid = new_id()
database.put_ref(ref, nid, source, urlref)
logging.info('Added ref ' + ref)
except database.IntegrityError:
continue
def _update_current_story(item):
try:
story = database.get_story(item['sid']).data
except AttributeError:
story = dict(id=item['sid'], ref=item['ref'], source=item['source'])
logging.info('Updating story: {}'.format(str(story['ref'])))
valid = feed.update_story(story, urlref=item['urlref'])
if valid:
database.put_story(story)
search.put_story(story)
else:
database.del_ref(item['ref'])
logging.info('Removed ref {}'.format(item['ref']))
def feed_thread(): def feed_thread():
global news_index, ref_list, current_item ref_list = []
try: try:
while True: while True:
# onboard new stories # onboard new stories
if news_index == 0: if not len(ref_list):
for ref, source in feed.list(): _add_new_refs()
if database.get_story_by_ref(ref): ref_list = database.get_reflist()
continue
try:
nid = new_id()
logging.info('Adding ref: {}, id: {}, source: {}'.format(ref, nid, source))
database.put_ref(ref, nid, source)
except database.IntegrityError:
logging.info('Already have ID / ref, skipping.')
continue
ref_list = database.get_reflist(settings.FEED_LENGTH)
# update current stories # update current stories
if news_index < len(ref_list): if len(ref_list):
current_item = ref_list[news_index] item = ref_list.pop(0)
_update_current_story(item)
try:
story_json = database.get_story(current_item['sid']).full_json
story = json.loads(story_json)
except AttributeError:
story = dict(id=current_item['sid'], ref=current_item['ref'], source=current_item['source'])
logging.info('Updating {} story: {}, index: {}'.format(story['source'], story['ref'], news_index))
valid = feed.update_story(story)
if valid:
database.put_story(story)
search.put_story(story)
else:
database.del_ref(current_item['ref'])
logging.info('Removed ref {}'.format(current_item['ref']))
else:
logging.info('Skipping index: ' + str(news_index))
gevent.sleep(6) gevent.sleep(6)
news_index += 1
if news_index == settings.FEED_LENGTH: news_index = 0
except KeyboardInterrupt: except KeyboardInterrupt:
logging.info('Ending feed thread...') logging.info('Ending feed thread...')
except ValueError as e: except ValueError as e:
logging.critical('feed_thread error: {} {}'.format(e.__class__.__name__, e)) logging.error('feed_thread error: {} {}'.format(e.__class__.__name__, e))
http_server.stop() http_server.stop()
logging.info('Starting Feed thread...') print('Starting Feed thread...')
gevent.spawn(feed_thread) gevent.spawn(feed_thread)
logging.info('Starting HTTP thread...') print('Starting HTTP thread...')
try: try:
http_server.serve_forever() http_server.serve_forever()
except KeyboardInterrupt: except KeyboardInterrupt:

View File

@@ -1,23 +1,59 @@
# QotNews settings # QotNews settings
# edit this file and save it as settings.py # edit this file and save it as settings.py
HOSTNAME = 'news.t0.vc'
MAX_STORY_AGE = 3*24*60*60
SCRAPERS = ['headless', 'outline', 'declutter', 'simple']
API_PORT = 33842
READER_PORT = 3000
# Feed Lengths # Feed Lengths
# Number of top items from each site to pull # Number of top items from each site to pull
# set to 0 to disable that site # set to 0 to disable that site
FEED_LENGTH = 75
NUM_HACKERNEWS = 15 NUM_HACKERNEWS = 15
NUM_LOBSTERS = 10 NUM_REDDIT = 10
NUM_REDDIT = 15
NUM_TILDES = 5 NUM_TILDES = 5
NUM_SUBSTACK = 10
# Meilisearch server URL SITEMAP = {}
# Leave blank if not using search # SITEMAP['nzherald'] = {
#MEILI_URL = 'http://127.0.0.1:7700/' # 'url': "https://www.nzherald.co.nz/arcio/news-sitemap/",
MEILI_URL = '' # 'count': 20,
# 'patterns': [
# r'^https:\/\/www\.(nzherald\.co\.nz)\/.*\/([^/]+)\/?$',
# ],
# 'excludes': [
# 'driven.co.nz',
# 'oneroof.co.nz',
# 'nzherald.co.nz/sponsored-stories',
# 'nzherald.co.nz/entertainment/',
# 'nzherald.co.nz/lifestyle/',
# 'nzherald.co.nz/travel/',
# 'nzherald.co.nz/sport/',
# 'nzherald.co.nz/promotions/',
# 'nzherald.co.nzhttp',
# 'herald-afternoon-quiz',
# 'herald-morning-quiz'
# ],
# }
# Readerserver URL SUBSTACK = {}
# Leave blank if not using, but that defeats the whole point # SUBSTACK['webworm'] = { 'url': "https://www.webworm.co", 'count': 10},
READER_URL = 'http://127.0.0.1:33843/' # SUBSTACK['the bulletin'] = { 'url': "https://thespinoff.substack.com", 'count': 10},
CATEGORY = {}
# CATEGORY['radionz'] = {
# 'url': "https://www.rnz.co.nz/news/",
# 'count': 20,
# 'patterns': [
# r'https:\/\/www\.(rnz\.co\.nz)\/news\/[^\/]+\/(\d+)\/[^\/]+\/?'
# ],
# 'excludes': [
# 'rnz.co.nz/news/sport',
# 'rnz.co.nz/weather',
# ],
# }
# Reddit account info # Reddit account info
# leave blank if not using Reddit # leave blank if not using Reddit
@@ -25,6 +61,10 @@ REDDIT_CLIENT_ID = ''
REDDIT_CLIENT_SECRET = '' REDDIT_CLIENT_SECRET = ''
REDDIT_USER_AGENT = '' REDDIT_USER_AGENT = ''
# Minimum points or number of comments before including a thread:
REDDIT_COMMENT_THRESHOLD = 10
REDDIT_SCORE_THRESHOLD = 25
SUBREDDITS = [ SUBREDDITS = [
'Economics', 'Economics',
'AcademicPhilosophy', 'AcademicPhilosophy',
@@ -33,9 +73,13 @@ SUBREDDITS = [
'HistoryofIdeas', 'HistoryofIdeas',
'LaymanJournals', 'LaymanJournals',
'PhilosophyofScience', 'PhilosophyofScience',
'PoliticsPDFs',
'Scholar',
'StateOfTheUnion', 'StateOfTheUnion',
'TheAgora', 'TheAgora',
'TrueFilm',
'TrueReddit', 'TrueReddit',
'UniversityofReddit',
'culturalstudies', 'culturalstudies',
'hardscience', 'hardscience',
'indepthsports', 'indepthsports',
@@ -44,7 +88,4 @@ SUBREDDITS = [
'neurophilosophy', 'neurophilosophy',
'resilientcommunities', 'resilientcommunities',
'worldevents', 'worldevents',
'StallmanWasRight',
'EverythingScience',
'longevity',
] ]

View File

@@ -8,17 +8,8 @@ import string
from bleach.sanitizer import Cleaner from bleach.sanitizer import Cleaner
def alert_tanner(message):
try:
logging.info('Alerting Tanner: ' + message)
params = dict(qotnews=message)
requests.get('https://tbot.tannercollin.com/message', params=params, timeout=4)
except BaseException as e:
logging.error('Problem alerting Tanner: ' + str(e))
NUM_ID_CHARS = 4
def gen_rand_id(): def gen_rand_id():
return ''.join(random.choice(string.ascii_uppercase) for _ in range(NUM_ID_CHARS)) return ''.join(random.choice(string.ascii_uppercase) for _ in range(4))
def render_md(md): def render_md(md):
if md: if md:

1
readerserver Submodule

Submodule readerserver added at 50a94df728

View File

@@ -1,92 +0,0 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# TypeScript v1 declaration files
typings/
# TypeScript cache
*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variables file
.env
.env.test
# parcel-bundler cache (https://parceljs.org/)
.cache
# next.js build output
.next
# nuxt.js build output
.nuxt
# vuepress build output
.vuepress/dist
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# Editor
*.swp
*.swo

View File

@@ -1,54 +0,0 @@
const express = require('express');
const app = express();
const port = 33843;
const request = require('request');
const JSDOM = require('jsdom').JSDOM;
const { Readability } = require('readability');
app.use(express.urlencoded({ extended: true }));
app.get('/', (req, res) => {
res.send('<form method="POST" accept-charset="UTF-8"><input name="url"><button type="submit">SUBMIT</button></form>');
});
const requestCallback = (url, res) => (error, response, body) => {
if (!error && response.statusCode == 200) {
console.log('Response OK.');
const doc = new JSDOM(body, {url: url});
const reader = new Readability(doc.window.document);
const article = reader.parse();
if (article && article.content) {
res.send(article.content);
} else {
res.sendStatus(404);
}
} else {
console.log('Response error:', error ? error.toString() : response.statusCode);
res.sendStatus(response ? response.statusCode : 404);
}
};
app.post('/', (req, res) => {
const url = req.body.url;
const requestOptions = {
url: url,
gzip: true,
//headers: {'User-Agent': 'Googlebot/2.1 (+http://www.google.com/bot.html)'},
//headers: {'User-Agent': 'Twitterbot/1.0'},
headers: {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0',
'X-Forwarded-For': '66.249.66.1',
},
};
console.log('Parse request for:', url);
request(requestOptions, requestCallback(url, res));
});
app.listen(port, () => {
console.log(`Example app listening on port ${port}!`);
});

View File

@@ -1,13 +0,0 @@
{
"name": "readerserver",
"version": "1.0.0",
"main": "main.js",
"license": "MIT",
"dependencies": {
"dompurify": "^1.0.11",
"express": "^4.17.1",
"jsdom": "^15.1.1",
"readability": "https://github.com/mozilla/readability",
"request": "^2.88.0"
}
}

View File

@@ -1,994 +0,0 @@
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
abab@^2.0.0:
version "2.0.5"
resolved "https://registry.yarnpkg.com/abab/-/abab-2.0.5.tgz#c0b678fb32d60fc1219c784d6a826fe385aeb79a"
integrity sha512-9IK9EadsbHo6jLWIpxpR6pL0sazTXV6+SQv25ZB+F7Bj9mJNaOc4nCRabwd5M/JwmUa8idz6Eci6eKfJryPs6Q==
accepts@~1.3.8:
version "1.3.8"
resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.8.tgz#0bf0be125b67014adcb0b0921e62db7bffe16b2e"
integrity sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==
dependencies:
mime-types "~2.1.34"
negotiator "0.6.3"
acorn-globals@^4.3.2:
version "4.3.4"
resolved "https://registry.yarnpkg.com/acorn-globals/-/acorn-globals-4.3.4.tgz#9fa1926addc11c97308c4e66d7add0d40c3272e7"
integrity sha512-clfQEh21R+D0leSbUdWf3OcfqyaCSAQ8Ryq00bofSekfr9W8u1jyYZo6ir0xu9Gtcf7BjcHJpnbZH7JOCpP60A==
dependencies:
acorn "^6.0.1"
acorn-walk "^6.0.1"
acorn-walk@^6.0.1:
version "6.2.0"
resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-6.2.0.tgz#123cb8f3b84c2171f1f7fb252615b1c78a6b1a8c"
integrity sha512-7evsyfH1cLOCdAzZAd43Cic04yKydNx0cF+7tiA19p1XnLLPU4dpCQOqpjqwokFe//vS0QqfqqjCS2JkiIs0cA==
acorn@^6.0.1:
version "6.4.2"
resolved "https://registry.yarnpkg.com/acorn/-/acorn-6.4.2.tgz#35866fd710528e92de10cf06016498e47e39e1e6"
integrity sha512-XtGIhXwF8YM8bJhGxG5kXgjkEuNGLTkoYqVE+KMR+aspr4KGYmKYg7yUe3KghyQ9yheNwLnjmzh/7+gfDBmHCQ==
acorn@^7.1.0:
version "7.4.1"
resolved "https://registry.yarnpkg.com/acorn/-/acorn-7.4.1.tgz#feaed255973d2e77555b83dbc08851a6c63520fa"
integrity sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A==
ajv@^6.12.3:
version "6.12.6"
resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4"
integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==
dependencies:
fast-deep-equal "^3.1.1"
fast-json-stable-stringify "^2.0.0"
json-schema-traverse "^0.4.1"
uri-js "^4.2.2"
array-equal@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93"
integrity sha1-jCpe8kcv2ep0KwTHenUJO6J1fJM=
array-flatten@1.1.1:
version "1.1.1"
resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2"
integrity sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=
asn1@~0.2.3:
version "0.2.6"
resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.6.tgz#0d3a7bb6e64e02a90c0303b31f292868ea09a08d"
integrity sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==
dependencies:
safer-buffer "~2.1.0"
assert-plus@1.0.0, assert-plus@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525"
integrity sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==
asynckit@^0.4.0:
version "0.4.0"
resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==
aws-sign2@~0.7.0:
version "0.7.0"
resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8"
integrity sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA==
aws4@^1.8.0:
version "1.11.0"
resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.11.0.tgz#d61f46d83b2519250e2784daf5b09479a8b41c59"
integrity sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==
bcrypt-pbkdf@^1.0.0:
version "1.0.2"
resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz#a4301d389b6a43f9b67ff3ca11a3f6637e360e9e"
integrity sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==
dependencies:
tweetnacl "^0.14.3"
body-parser@1.19.2:
version "1.19.2"
resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.19.2.tgz#4714ccd9c157d44797b8b5607d72c0b89952f26e"
integrity sha512-SAAwOxgoCKMGs9uUAUFHygfLAyaniaoun6I8mFY9pRAJL9+Kec34aU+oIjDhTycub1jozEfEwx1W1IuOYxVSFw==
dependencies:
bytes "3.1.2"
content-type "~1.0.4"
debug "2.6.9"
depd "~1.1.2"
http-errors "1.8.1"
iconv-lite "0.4.24"
on-finished "~2.3.0"
qs "6.9.7"
raw-body "2.4.3"
type-is "~1.6.18"
browser-process-hrtime@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/browser-process-hrtime/-/browser-process-hrtime-1.0.0.tgz#3c9b4b7d782c8121e56f10106d84c0d0ffc94626"
integrity sha512-9o5UecI3GhkpM6DrXr69PblIuWxPKk9Y0jHBRhdocZ2y7YECBFCsHm79Pr3OyR2AvjhDkabFJaDJMYRazHgsow==
bytes@3.1.2:
version "3.1.2"
resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.2.tgz#8b0beeb98605adf1b128fa4386403c009e0221a5"
integrity sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==
caseless@~0.12.0:
version "0.12.0"
resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc"
integrity sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==
combined-stream@^1.0.6, combined-stream@~1.0.6:
version "1.0.8"
resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f"
integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==
dependencies:
delayed-stream "~1.0.0"
content-disposition@0.5.4:
version "0.5.4"
resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.4.tgz#8b82b4efac82512a02bb0b1dcec9d2c5e8eb5bfe"
integrity sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==
dependencies:
safe-buffer "5.2.1"
content-type@~1.0.4:
version "1.0.4"
resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.4.tgz#e138cc75e040c727b1966fe5e5f8c9aee256fe3b"
integrity sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==
cookie-signature@1.0.6:
version "1.0.6"
resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c"
integrity sha1-4wOogrNCzD7oylE6eZmXNNqzriw=
cookie@0.4.2:
version "0.4.2"
resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.4.2.tgz#0e41f24de5ecf317947c82fc789e06a884824432"
integrity sha512-aSWTXFzaKWkvHO1Ny/s+ePFpvKsPnjc551iI41v3ny/ow6tBG5Vd+FuqGNhh1LxOmVzOlGUriIlOaokOvhaStA==
core-util-is@1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
integrity sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==
cssom@^0.4.1:
version "0.4.4"
resolved "https://registry.yarnpkg.com/cssom/-/cssom-0.4.4.tgz#5a66cf93d2d0b661d80bf6a44fb65f5c2e4e0a10"
integrity sha512-p3pvU7r1MyyqbTk+WbNJIgJjG2VmTIaB10rI93LzVPrmDJKkzKYMtxxyAvQXR/NS6otuzveI7+7BBq3SjBS2mw==
cssom@~0.3.6:
version "0.3.8"
resolved "https://registry.yarnpkg.com/cssom/-/cssom-0.3.8.tgz#9f1276f5b2b463f2114d3f2c75250af8c1a36f4a"
integrity sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg==
cssstyle@^2.0.0:
version "2.3.0"
resolved "https://registry.yarnpkg.com/cssstyle/-/cssstyle-2.3.0.tgz#ff665a0ddbdc31864b09647f34163443d90b0852"
integrity sha512-AZL67abkUzIuvcHqk7c09cezpGNcxUxU4Ioi/05xHk4DQeTkWmGYftIE6ctU6AEt+Gn4n1lDStOtj7FKycP71A==
dependencies:
cssom "~0.3.6"
dashdash@^1.12.0:
version "1.14.1"
resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0"
integrity sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==
dependencies:
assert-plus "^1.0.0"
data-urls@^1.1.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/data-urls/-/data-urls-1.1.0.tgz#15ee0582baa5e22bb59c77140da8f9c76963bbfe"
integrity sha512-YTWYI9se1P55u58gL5GkQHW4P6VJBJ5iBT+B5a7i2Tjadhv52paJG0qHX4A0OR6/t52odI64KP2YvFpkDOi3eQ==
dependencies:
abab "^2.0.0"
whatwg-mimetype "^2.2.0"
whatwg-url "^7.0.0"
debug@2.6.9:
version "2.6.9"
resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f"
integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==
dependencies:
ms "2.0.0"
deep-is@~0.1.3:
version "0.1.4"
resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.4.tgz#a6f2dce612fadd2ef1f519b73551f17e85199831"
integrity sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==
delayed-stream@~1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==
depd@~1.1.2:
version "1.1.2"
resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.2.tgz#9bcd52e14c097763e749b274c4346ed2e560b5a9"
integrity sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=
destroy@~1.0.4:
version "1.0.4"
resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80"
integrity sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=
domexception@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/domexception/-/domexception-1.0.1.tgz#937442644ca6a31261ef36e3ec677fe805582c90"
integrity sha512-raigMkn7CJNNo6Ihro1fzG7wr3fHuYVytzquZKX5n0yizGsTcYgzdIUwj1X9pK0VvjeihV+XiclP+DjwbsSKug==
dependencies:
webidl-conversions "^4.0.2"
dompurify@^1.0.11:
version "1.0.11"
resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-1.0.11.tgz#fe0f4a40d147f7cebbe31a50a1357539cfc1eb4d"
integrity sha512-XywCTXZtc/qCX3iprD1pIklRVk/uhl8BKpkTxr+ZyMVUzSUg7wkQXRBp/euJ5J5moa1QvfpvaPQVP71z1O59dQ==
ecc-jsbn@~0.1.1:
version "0.1.2"
resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9"
integrity sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw==
dependencies:
jsbn "~0.1.0"
safer-buffer "^2.1.0"
ee-first@1.1.1:
version "1.1.1"
resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
integrity sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=
encodeurl@~1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59"
integrity sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=
escape-html@~1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988"
integrity sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=
escodegen@^1.11.1:
version "1.14.3"
resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-1.14.3.tgz#4e7b81fba61581dc97582ed78cab7f0e8d63f503"
integrity sha512-qFcX0XJkdg+PB3xjZZG/wKSuT1PnQWx57+TVSjIMmILd2yC/6ByYElPwJnslDsuWuSAp4AwJGumarAAmJch5Kw==
dependencies:
esprima "^4.0.1"
estraverse "^4.2.0"
esutils "^2.0.2"
optionator "^0.8.1"
optionalDependencies:
source-map "~0.6.1"
esprima@^4.0.1:
version "4.0.1"
resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71"
integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==
estraverse@^4.2.0:
version "4.3.0"
resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.3.0.tgz#398ad3f3c5a24948be7725e83d11a7de28cdbd1d"
integrity sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==
esutils@^2.0.2:
version "2.0.3"
resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64"
integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==
etag@~1.8.1:
version "1.8.1"
resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.1.tgz#41ae2eeb65efa62268aebfea83ac7d79299b0887"
integrity sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=
express@^4.17.1:
version "4.17.3"
resolved "https://registry.yarnpkg.com/express/-/express-4.17.3.tgz#f6c7302194a4fb54271b73a1fe7a06478c8f85a1"
integrity sha512-yuSQpz5I+Ch7gFrPCk4/c+dIBKlQUxtgwqzph132bsT6qhuzss6I8cLJQz7B3rFblzd6wtcI0ZbGltH/C4LjUg==
dependencies:
accepts "~1.3.8"
array-flatten "1.1.1"
body-parser "1.19.2"
content-disposition "0.5.4"
content-type "~1.0.4"
cookie "0.4.2"
cookie-signature "1.0.6"
debug "2.6.9"
depd "~1.1.2"
encodeurl "~1.0.2"
escape-html "~1.0.3"
etag "~1.8.1"
finalhandler "~1.1.2"
fresh "0.5.2"
merge-descriptors "1.0.1"
methods "~1.1.2"
on-finished "~2.3.0"
parseurl "~1.3.3"
path-to-regexp "0.1.7"
proxy-addr "~2.0.7"
qs "6.9.7"
range-parser "~1.2.1"
safe-buffer "5.2.1"
send "0.17.2"
serve-static "1.14.2"
setprototypeof "1.2.0"
statuses "~1.5.0"
type-is "~1.6.18"
utils-merge "1.0.1"
vary "~1.1.2"
extend@~3.0.2:
version "3.0.2"
resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa"
integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==
extsprintf@1.3.0:
version "1.3.0"
resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05"
integrity sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==
extsprintf@^1.2.0:
version "1.4.1"
resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.4.1.tgz#8d172c064867f235c0c84a596806d279bf4bcc07"
integrity sha512-Wrk35e8ydCKDj/ArClo1VrPVmN8zph5V4AtHwIuHhvMXsKf73UT3BOD+azBIW+3wOJ4FhEH7zyaJCFvChjYvMA==
fast-deep-equal@^3.1.1:
version "3.1.3"
resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525"
integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==
fast-json-stable-stringify@^2.0.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633"
integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==
fast-levenshtein@~2.0.6:
version "2.0.6"
resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917"
integrity sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=
finalhandler@~1.1.2:
version "1.1.2"
resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.1.2.tgz#b7e7d000ffd11938d0fdb053506f6ebabe9f587d"
integrity sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==
dependencies:
debug "2.6.9"
encodeurl "~1.0.2"
escape-html "~1.0.3"
on-finished "~2.3.0"
parseurl "~1.3.3"
statuses "~1.5.0"
unpipe "~1.0.0"
forever-agent@~0.6.1:
version "0.6.1"
resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91"
integrity sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw==
form-data@~2.3.2:
version "2.3.3"
resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.3.tgz#dcce52c05f644f298c6a7ab936bd724ceffbf3a6"
integrity sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==
dependencies:
asynckit "^0.4.0"
combined-stream "^1.0.6"
mime-types "^2.1.12"
forwarded@0.2.0:
version "0.2.0"
resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.2.0.tgz#2269936428aad4c15c7ebe9779a84bf0b2a81811"
integrity sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==
fresh@0.5.2:
version "0.5.2"
resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7"
integrity sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=
getpass@^0.1.1:
version "0.1.7"
resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa"
integrity sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng==
dependencies:
assert-plus "^1.0.0"
har-schema@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-2.0.0.tgz#a94c2224ebcac04782a0d9035521f24735b7ec92"
integrity sha512-Oqluz6zhGX8cyRaTQlFMPw80bSJVG2x/cFb8ZPhUILGgHka9SsokCCOQgpveePerqidZOrT14ipqfJb7ILcW5Q==
har-validator@~5.1.3:
version "5.1.5"
resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.1.5.tgz#1f0803b9f8cb20c0fa13822df1ecddb36bde1efd"
integrity sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==
dependencies:
ajv "^6.12.3"
har-schema "^2.0.0"
html-encoding-sniffer@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-1.0.2.tgz#e70d84b94da53aa375e11fe3a351be6642ca46f8"
integrity sha512-71lZziiDnsuabfdYiUeWdCVyKuqwWi23L8YeIgV9jSSZHCtb6wB1BKWooH7L3tn4/FuZJMVWyNaIDr4RGmaSYw==
dependencies:
whatwg-encoding "^1.0.1"
http-errors@1.8.1:
version "1.8.1"
resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.8.1.tgz#7c3f28577cbc8a207388455dbd62295ed07bd68c"
integrity sha512-Kpk9Sm7NmI+RHhnj6OIWDI1d6fIoFAtFt9RLaTMRlg/8w49juAStsrBgp0Dp4OdxdVbRIeKhtCUvoi/RuAhO4g==
dependencies:
depd "~1.1.2"
inherits "2.0.4"
setprototypeof "1.2.0"
statuses ">= 1.5.0 < 2"
toidentifier "1.0.1"
http-signature@~1.2.0:
version "1.2.0"
resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.2.0.tgz#9aecd925114772f3d95b65a60abb8f7c18fbace1"
integrity sha512-CAbnr6Rz4CYQkLYUtSNXxQPUH2gK8f3iWexVlsnMeD+GjlsQ0Xsy1cOX+mN3dtxYomRy21CiOzU8Uhw6OwncEQ==
dependencies:
assert-plus "^1.0.0"
jsprim "^1.2.2"
sshpk "^1.7.0"
iconv-lite@0.4.24:
version "0.4.24"
resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b"
integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==
dependencies:
safer-buffer ">= 2.1.2 < 3"
inherits@2.0.4:
version "2.0.4"
resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c"
integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==
ip-regex@^2.1.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-2.1.0.tgz#fa78bf5d2e6913c911ce9f819ee5146bb6d844e9"
integrity sha1-+ni/XS5pE8kRzp+BnuUUa7bYROk=
ipaddr.js@1.9.1:
version "1.9.1"
resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.9.1.tgz#bff38543eeb8984825079ff3a2a8e6cbd46781b3"
integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==
is-typedarray@~1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a"
integrity sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==
isstream@~0.1.2:
version "0.1.2"
resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a"
integrity sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g==
jsbn@~0.1.0:
version "0.1.1"
resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513"
integrity sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==
jsdom@^15.1.1:
version "15.2.1"
resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-15.2.1.tgz#d2feb1aef7183f86be521b8c6833ff5296d07ec5"
integrity sha512-fAl1W0/7T2G5vURSyxBzrJ1LSdQn6Tr5UX/xD4PXDx/PDgwygedfW6El/KIj3xJ7FU61TTYnc/l/B7P49Eqt6g==
dependencies:
abab "^2.0.0"
acorn "^7.1.0"
acorn-globals "^4.3.2"
array-equal "^1.0.0"
cssom "^0.4.1"
cssstyle "^2.0.0"
data-urls "^1.1.0"
domexception "^1.0.1"
escodegen "^1.11.1"
html-encoding-sniffer "^1.0.2"
nwsapi "^2.2.0"
parse5 "5.1.0"
pn "^1.1.0"
request "^2.88.0"
request-promise-native "^1.0.7"
saxes "^3.1.9"
symbol-tree "^3.2.2"
tough-cookie "^3.0.1"
w3c-hr-time "^1.0.1"
w3c-xmlserializer "^1.1.2"
webidl-conversions "^4.0.2"
whatwg-encoding "^1.0.5"
whatwg-mimetype "^2.3.0"
whatwg-url "^7.0.0"
ws "^7.0.0"
xml-name-validator "^3.0.0"
json-schema-traverse@^0.4.1:
version "0.4.1"
resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660"
integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==
json-schema@0.4.0:
version "0.4.0"
resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.4.0.tgz#f7de4cf6efab838ebaeb3236474cbba5a1930ab5"
integrity sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==
json-stringify-safe@~5.0.1:
version "5.0.1"
resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb"
integrity sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==
jsprim@^1.2.2:
version "1.4.2"
resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.2.tgz#712c65533a15c878ba59e9ed5f0e26d5b77c5feb"
integrity sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw==
dependencies:
assert-plus "1.0.0"
extsprintf "1.3.0"
json-schema "0.4.0"
verror "1.10.0"
levn@~0.3.0:
version "0.3.0"
resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee"
integrity sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=
dependencies:
prelude-ls "~1.1.2"
type-check "~0.3.2"
lodash.sortby@^4.7.0:
version "4.7.0"
resolved "https://registry.yarnpkg.com/lodash.sortby/-/lodash.sortby-4.7.0.tgz#edd14c824e2cc9c1e0b0a1b42bb5210516a42438"
integrity sha1-7dFMgk4sycHgsKG0K7UhBRakJDg=
lodash@^4.17.19:
version "4.17.21"
resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c"
integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==
media-typer@0.3.0:
version "0.3.0"
resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748"
integrity sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=
merge-descriptors@1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61"
integrity sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=
methods@~1.1.2:
version "1.1.2"
resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee"
integrity sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=
mime-db@1.51.0:
version "1.51.0"
resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.51.0.tgz#d9ff62451859b18342d960850dc3cfb77e63fb0c"
integrity sha512-5y8A56jg7XVQx2mbv1lu49NR4dokRnhZYTtL+KGfaa27uq4pSTXkwQkFJl4pkRMyNFz/EtYDSkiiEHx3F7UN6g==
mime-db@1.52.0:
version "1.52.0"
resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70"
integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==
mime-types@^2.1.12, mime-types@~2.1.19:
version "2.1.35"
resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a"
integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
dependencies:
mime-db "1.52.0"
mime-types@~2.1.24, mime-types@~2.1.34:
version "2.1.34"
resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.34.tgz#5a712f9ec1503511a945803640fafe09d3793c24"
integrity sha512-6cP692WwGIs9XXdOO4++N+7qjqv0rqxxVvJ3VHPh/Sc9mVZcQP+ZGhkKiTvWMQRr2tbHkJP/Yn7Y0npb3ZBs4A==
dependencies:
mime-db "1.51.0"
mime@1.6.0:
version "1.6.0"
resolved "https://registry.yarnpkg.com/mime/-/mime-1.6.0.tgz#32cd9e5c64553bd58d19a568af452acff04981b1"
integrity sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==
ms@2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8"
integrity sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=
ms@2.1.3:
version "2.1.3"
resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2"
integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==
negotiator@0.6.3:
version "0.6.3"
resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.3.tgz#58e323a72fedc0d6f9cd4d31fe49f51479590ccd"
integrity sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==
nwsapi@^2.2.0:
version "2.2.0"
resolved "https://registry.yarnpkg.com/nwsapi/-/nwsapi-2.2.0.tgz#204879a9e3d068ff2a55139c2c772780681a38b7"
integrity sha512-h2AatdwYH+JHiZpv7pt/gSX1XoRGb7L/qSIeuqA6GwYoF9w1vP1cw42TO0aI2pNyshRK5893hNSl+1//vHK7hQ==
oauth-sign@~0.9.0:
version "0.9.0"
resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.9.0.tgz#47a7b016baa68b5fa0ecf3dee08a85c679ac6455"
integrity sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==
on-finished@~2.3.0:
version "2.3.0"
resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947"
integrity sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=
dependencies:
ee-first "1.1.1"
optionator@^0.8.1:
version "0.8.3"
resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.3.tgz#84fa1d036fe9d3c7e21d99884b601167ec8fb495"
integrity sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==
dependencies:
deep-is "~0.1.3"
fast-levenshtein "~2.0.6"
levn "~0.3.0"
prelude-ls "~1.1.2"
type-check "~0.3.2"
word-wrap "~1.2.3"
parse5@5.1.0:
version "5.1.0"
resolved "https://registry.yarnpkg.com/parse5/-/parse5-5.1.0.tgz#c59341c9723f414c452975564c7c00a68d58acd2"
integrity sha512-fxNG2sQjHvlVAYmzBZS9YlDp6PTSSDwa98vkD4QgVDDCAo84z5X1t5XyJQ62ImdLXx5NdIIfihey6xpum9/gRQ==
parseurl@~1.3.3:
version "1.3.3"
resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.3.tgz#9da19e7bee8d12dff0513ed5b76957793bc2e8d4"
integrity sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==
path-to-regexp@0.1.7:
version "0.1.7"
resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c"
integrity sha1-32BBeABfUi8V60SQ5yR6G/qmf4w=
performance-now@^2.1.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b"
integrity sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==
pn@^1.1.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/pn/-/pn-1.1.0.tgz#e2f4cef0e219f463c179ab37463e4e1ecdccbafb"
integrity sha512-2qHaIQr2VLRFoxe2nASzsV6ef4yOOH+Fi9FBOVH6cqeSgUnoyySPZkxzLuzd+RYOQTRpROA0ztTMqxROKSb/nA==
prelude-ls@~1.1.2:
version "1.1.2"
resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54"
integrity sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=
proxy-addr@~2.0.7:
version "2.0.7"
resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-2.0.7.tgz#f19fe69ceab311eeb94b42e70e8c2070f9ba1025"
integrity sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==
dependencies:
forwarded "0.2.0"
ipaddr.js "1.9.1"
psl@^1.1.28:
version "1.9.0"
resolved "https://registry.yarnpkg.com/psl/-/psl-1.9.0.tgz#d0df2a137f00794565fcaf3b2c00cd09f8d5a5a7"
integrity sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==
punycode@^2.1.0, punycode@^2.1.1:
version "2.1.1"
resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec"
integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==
qs@6.9.7:
version "6.9.7"
resolved "https://registry.yarnpkg.com/qs/-/qs-6.9.7.tgz#4610846871485e1e048f44ae3b94033f0e675afe"
integrity sha512-IhMFgUmuNpyRfxA90umL7ByLlgRXu6tIfKPpF5TmcfRLlLCckfP/g3IQmju6jjpu+Hh8rA+2p6A27ZSPOOHdKw==
qs@~6.5.2:
version "6.5.3"
resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.3.tgz#3aeeffc91967ef6e35c0e488ef46fb296ab76aad"
integrity sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==
range-parser@~1.2.1:
version "1.2.1"
resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.1.tgz#3cf37023d199e1c24d1a55b84800c2f3e6468031"
integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==
raw-body@2.4.3:
version "2.4.3"
resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.4.3.tgz#8f80305d11c2a0a545c2d9d89d7a0286fcead43c"
integrity sha512-UlTNLIcu0uzb4D2f4WltY6cVjLi+/jEN4lgEUj3E04tpMDpUlkBo/eSn6zou9hum2VMNpCCUone0O0WeJim07g==
dependencies:
bytes "3.1.2"
http-errors "1.8.1"
iconv-lite "0.4.24"
unpipe "1.0.0"
"readability@https://github.com/mozilla/readability":
version "0.5.0"
resolved "https://github.com/mozilla/readability#39a5c5409fb653858b1832141895b882b9092b47"
request-promise-core@1.1.4:
version "1.1.4"
resolved "https://registry.yarnpkg.com/request-promise-core/-/request-promise-core-1.1.4.tgz#3eedd4223208d419867b78ce815167d10593a22f"
integrity sha512-TTbAfBBRdWD7aNNOoVOBH4pN/KigV6LyapYNNlAPA8JwbovRti1E88m3sYAwsLi5ryhPKsE9APwnjFTgdUjTpw==
dependencies:
lodash "^4.17.19"
request-promise-native@^1.0.7:
version "1.0.9"
resolved "https://registry.yarnpkg.com/request-promise-native/-/request-promise-native-1.0.9.tgz#e407120526a5efdc9a39b28a5679bf47b9d9dc28"
integrity sha512-wcW+sIUiWnKgNY0dqCpOZkUbF/I+YPi+f09JZIDa39Ec+q82CpSYniDp+ISgTTbKmnpJWASeJBPZmoxH84wt3g==
dependencies:
request-promise-core "1.1.4"
stealthy-require "^1.1.1"
tough-cookie "^2.3.3"
request@^2.88.0:
version "2.88.2"
resolved "https://registry.yarnpkg.com/request/-/request-2.88.2.tgz#d73c918731cb5a87da047e207234146f664d12b3"
integrity sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==
dependencies:
aws-sign2 "~0.7.0"
aws4 "^1.8.0"
caseless "~0.12.0"
combined-stream "~1.0.6"
extend "~3.0.2"
forever-agent "~0.6.1"
form-data "~2.3.2"
har-validator "~5.1.3"
http-signature "~1.2.0"
is-typedarray "~1.0.0"
isstream "~0.1.2"
json-stringify-safe "~5.0.1"
mime-types "~2.1.19"
oauth-sign "~0.9.0"
performance-now "^2.1.0"
qs "~6.5.2"
safe-buffer "^5.1.2"
tough-cookie "~2.5.0"
tunnel-agent "^0.6.0"
uuid "^3.3.2"
safe-buffer@5.2.1, safe-buffer@^5.0.1, safe-buffer@^5.1.2:
version "5.2.1"
resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6"
integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==
"safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0:
version "2.1.2"
resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a"
integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==
saxes@^3.1.9:
version "3.1.11"
resolved "https://registry.yarnpkg.com/saxes/-/saxes-3.1.11.tgz#d59d1fd332ec92ad98a2e0b2ee644702384b1c5b"
integrity sha512-Ydydq3zC+WYDJK1+gRxRapLIED9PWeSuuS41wqyoRmzvhhh9nc+QQrVMKJYzJFULazeGhzSV0QleN2wD3boh2g==
dependencies:
xmlchars "^2.1.1"
send@0.17.2:
version "0.17.2"
resolved "https://registry.yarnpkg.com/send/-/send-0.17.2.tgz#926622f76601c41808012c8bf1688fe3906f7820"
integrity sha512-UJYB6wFSJE3G00nEivR5rgWp8c2xXvJ3OPWPhmuteU0IKj8nKbG3DrjiOmLwpnHGYWAVwA69zmTm++YG0Hmwww==
dependencies:
debug "2.6.9"
depd "~1.1.2"
destroy "~1.0.4"
encodeurl "~1.0.2"
escape-html "~1.0.3"
etag "~1.8.1"
fresh "0.5.2"
http-errors "1.8.1"
mime "1.6.0"
ms "2.1.3"
on-finished "~2.3.0"
range-parser "~1.2.1"
statuses "~1.5.0"
serve-static@1.14.2:
version "1.14.2"
resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.14.2.tgz#722d6294b1d62626d41b43a013ece4598d292bfa"
integrity sha512-+TMNA9AFxUEGuC0z2mevogSnn9MXKb4fa7ngeRMJaaGv8vTwnIEkKi+QGvPt33HSnf8pRS+WGM0EbMtCJLKMBQ==
dependencies:
encodeurl "~1.0.2"
escape-html "~1.0.3"
parseurl "~1.3.3"
send "0.17.2"
setprototypeof@1.2.0:
version "1.2.0"
resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.2.0.tgz#66c9a24a73f9fc28cbe66b09fed3d33dcaf1b424"
integrity sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==
source-map@~0.6.1:
version "0.6.1"
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263"
integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==
sshpk@^1.7.0:
version "1.17.0"
resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.17.0.tgz#578082d92d4fe612b13007496e543fa0fbcbe4c5"
integrity sha512-/9HIEs1ZXGhSPE8X6Ccm7Nam1z8KcoCqPdI7ecm1N33EzAetWahvQWVqLZtaZQ+IDKX4IyA2o0gBzqIMkAagHQ==
dependencies:
asn1 "~0.2.3"
assert-plus "^1.0.0"
bcrypt-pbkdf "^1.0.0"
dashdash "^1.12.0"
ecc-jsbn "~0.1.1"
getpass "^0.1.1"
jsbn "~0.1.0"
safer-buffer "^2.0.2"
tweetnacl "~0.14.0"
"statuses@>= 1.5.0 < 2", statuses@~1.5.0:
version "1.5.0"
resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.5.0.tgz#161c7dac177659fd9811f43771fa99381478628c"
integrity sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=
stealthy-require@^1.1.1:
version "1.1.1"
resolved "https://registry.yarnpkg.com/stealthy-require/-/stealthy-require-1.1.1.tgz#35b09875b4ff49f26a777e509b3090a3226bf24b"
integrity sha1-NbCYdbT/SfJqd35QmzCQoyJr8ks=
symbol-tree@^3.2.2:
version "3.2.4"
resolved "https://registry.yarnpkg.com/symbol-tree/-/symbol-tree-3.2.4.tgz#430637d248ba77e078883951fb9aa0eed7c63fa2"
integrity sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==
toidentifier@1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.1.tgz#3be34321a88a820ed1bd80dfaa33e479fbb8dd35"
integrity sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==
tough-cookie@^2.3.3, tough-cookie@~2.5.0:
version "2.5.0"
resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.5.0.tgz#cd9fb2a0aa1d5a12b473bd9fb96fa3dcff65ade2"
integrity sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==
dependencies:
psl "^1.1.28"
punycode "^2.1.1"
tough-cookie@^3.0.1:
version "3.0.1"
resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-3.0.1.tgz#9df4f57e739c26930a018184887f4adb7dca73b2"
integrity sha512-yQyJ0u4pZsv9D4clxO69OEjLWYw+jbgspjTue4lTQZLfV0c5l1VmK2y1JK8E9ahdpltPOaAThPcp5nKPUgSnsg==
dependencies:
ip-regex "^2.1.0"
psl "^1.1.28"
punycode "^2.1.1"
tr46@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/tr46/-/tr46-1.0.1.tgz#a8b13fd6bfd2489519674ccde55ba3693b706d09"
integrity sha1-qLE/1r/SSJUZZ0zN5VujaTtwbQk=
dependencies:
punycode "^2.1.0"
tunnel-agent@^0.6.0:
version "0.6.0"
resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd"
integrity sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==
dependencies:
safe-buffer "^5.0.1"
tweetnacl@^0.14.3, tweetnacl@~0.14.0:
version "0.14.5"
resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64"
integrity sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==
type-check@~0.3.2:
version "0.3.2"
resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72"
integrity sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=
dependencies:
prelude-ls "~1.1.2"
type-is@~1.6.18:
version "1.6.18"
resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.18.tgz#4e552cd05df09467dcbc4ef739de89f2cf37c131"
integrity sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==
dependencies:
media-typer "0.3.0"
mime-types "~2.1.24"
unpipe@1.0.0, unpipe@~1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec"
integrity sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=
uri-js@^4.2.2:
version "4.4.1"
resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e"
integrity sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==
dependencies:
punycode "^2.1.0"
utils-merge@1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713"
integrity sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=
uuid@^3.3.2:
version "3.4.0"
resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.4.0.tgz#b23e4358afa8a202fe7a100af1f5f883f02007ee"
integrity sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==
vary@~1.1.2:
version "1.1.2"
resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc"
integrity sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=
verror@1.10.0:
version "1.10.0"
resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400"
integrity sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==
dependencies:
assert-plus "^1.0.0"
core-util-is "1.0.2"
extsprintf "^1.2.0"
w3c-hr-time@^1.0.1:
version "1.0.2"
resolved "https://registry.yarnpkg.com/w3c-hr-time/-/w3c-hr-time-1.0.2.tgz#0a89cdf5cc15822df9c360543676963e0cc308cd"
integrity sha512-z8P5DvDNjKDoFIHK7q8r8lackT6l+jo/Ye3HOle7l9nICP9lf1Ci25fy9vHd0JOWewkIFzXIEig3TdKT7JQ5fQ==
dependencies:
browser-process-hrtime "^1.0.0"
w3c-xmlserializer@^1.1.2:
version "1.1.2"
resolved "https://registry.yarnpkg.com/w3c-xmlserializer/-/w3c-xmlserializer-1.1.2.tgz#30485ca7d70a6fd052420a3d12fd90e6339ce794"
integrity sha512-p10l/ayESzrBMYWRID6xbuCKh2Fp77+sA0doRuGn4tTIMrrZVeqfpKjXHY+oDh3K4nLdPgNwMTVP6Vp4pvqbNg==
dependencies:
domexception "^1.0.1"
webidl-conversions "^4.0.2"
xml-name-validator "^3.0.0"
webidl-conversions@^4.0.2:
version "4.0.2"
resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-4.0.2.tgz#a855980b1f0b6b359ba1d5d9fb39ae941faa63ad"
integrity sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==
whatwg-encoding@^1.0.1, whatwg-encoding@^1.0.5:
version "1.0.5"
resolved "https://registry.yarnpkg.com/whatwg-encoding/-/whatwg-encoding-1.0.5.tgz#5abacf777c32166a51d085d6b4f3e7d27113ddb0"
integrity sha512-b5lim54JOPN9HtzvK9HFXvBma/rnfFeqsic0hSpjtDbVxR3dJKLc+KB4V6GgiGOvl7CY/KNh8rxSo9DKQrnUEw==
dependencies:
iconv-lite "0.4.24"
whatwg-mimetype@^2.2.0, whatwg-mimetype@^2.3.0:
version "2.3.0"
resolved "https://registry.yarnpkg.com/whatwg-mimetype/-/whatwg-mimetype-2.3.0.tgz#3d4b1e0312d2079879f826aff18dbeeca5960fbf"
integrity sha512-M4yMwr6mAnQz76TbJm914+gPpB/nCwvZbJU28cUD6dR004SAxDLOOSUaB1JDRqLtaOV/vi0IC5lEAGFgrjGv/g==
whatwg-url@^7.0.0:
version "7.1.0"
resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-7.1.0.tgz#c2c492f1eca612988efd3d2266be1b9fc6170d06"
integrity sha512-WUu7Rg1DroM7oQvGWfOiAK21n74Gg+T4elXEQYkOhtyLeWiJFoOGLXPKI/9gzIie9CtwVLm8wtw6YJdKyxSjeg==
dependencies:
lodash.sortby "^4.7.0"
tr46 "^1.0.1"
webidl-conversions "^4.0.2"
word-wrap@~1.2.3:
version "1.2.3"
resolved "https://registry.yarnpkg.com/word-wrap/-/word-wrap-1.2.3.tgz#610636f6b1f703891bd34771ccb17fb93b47079c"
integrity sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==
ws@^7.0.0:
version "7.5.7"
resolved "https://registry.yarnpkg.com/ws/-/ws-7.5.7.tgz#9e0ac77ee50af70d58326ecff7e85eb3fa375e67"
integrity sha512-KMvVuFzpKBuiIXW3E4u3mySRO2/mCHSyZDJQM5NQ9Q9KHWHWh0NHgfbRMLLrceUK5qAL4ytALJbpRMjixFZh8A==
xml-name-validator@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-3.0.0.tgz#6ae73e06de4d8c6e47f9fb181f78d648ad457c6a"
integrity sha512-A5CUptxDsvxKJEU3yO6DuWBSJz/qizqzJKOMIfUJHETbBw/sFaDxgd6fxm1ewUaM0jZ444Fc5vC5ROYurg/4Pw==
xmlchars@^2.1.1:
version "2.2.0"
resolved "https://registry.yarnpkg.com/xmlchars/-/xmlchars-2.2.0.tgz#060fe1bcb7f9c76fe2a17db86a9bc3ab894210cb"
integrity sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==

View File

@@ -3,7 +3,7 @@
Download MeiliSearch with: Download MeiliSearch with:
``` ```
wget https://github.com/meilisearch/meilisearch/releases/download/v0.27.0/meilisearch-linux-amd64 wget https://github.com/meilisearch/MeiliSearch/releases/download/v0.11.1/meilisearch-linux-amd64
chmod +x meilisearch-linux-amd64 chmod +x meilisearch-linux-amd64
``` ```

View File

Before

Width:  |  Height:  |  Size: 538 B

After

Width:  |  Height:  |  Size: 538 B

View File

Before

Width:  |  Height:  |  Size: 6.5 KiB

After

Width:  |  Height:  |  Size: 6.5 KiB

View File

Before

Width:  |  Height:  |  Size: 5.4 KiB

After

Width:  |  Height:  |  Size: 5.4 KiB

View File

Before

Width:  |  Height:  |  Size: 500 B

After

Width:  |  Height:  |  Size: 500 B

View File

@@ -4,14 +4,12 @@
"private": true, "private": true,
"dependencies": { "dependencies": {
"abort-controller": "^3.0.0", "abort-controller": "^3.0.0",
"katex": "^0.16.25",
"localforage": "^1.7.3", "localforage": "^1.7.3",
"moment": "^2.24.0", "moment": "^2.24.0",
"query-string": "^6.8.3", "query-string": "^6.8.3",
"react": "^16.9.0", "react": "^16.9.0",
"react-dom": "^16.9.0", "react-dom": "^16.9.0",
"react-helmet": "^5.2.1", "react-helmet": "^5.2.1",
"react-latex-next": "^3.0.0",
"react-router-dom": "^5.0.1", "react-router-dom": "^5.0.1",
"react-router-hash-link": "^1.2.2", "react-router-hash-link": "^1.2.2",
"react-scripts": "3.1.1" "react-scripts": "3.1.1"

View File

@@ -8,8 +8,6 @@
content="{{ description }}" content="{{ description }}"
/> />
<meta content="{{ url }}" name="og:site_name"> <meta content="{{ url }}" name="og:site_name">
<meta name="robots" content="{{ robots }}">
<link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon.png"> <link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon.png">
<link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png">
@@ -28,112 +26,26 @@
work correctly both with client-side routing and a non-root public URL. work correctly both with client-side routing and a non-root public URL.
Learn how to configure a non-root public URL by running `npm run build`. Learn how to configure a non-root public URL by running `npm run build`.
--> -->
<title>{{ title }}</title> <title>{{ title }} - QotNews</title>
<style> <style>
html { html {
overflow-y: scroll; overflow-y: scroll;
} }
body { body {
background: #eeeeee; background: #000;
}
.nojs {
color: white;
} }
</style> </style>
</head> </head>
<body> <body>
<div id="root"> <div class="nojs">
<div class="container menu"> <noscript>You need to enable JavaScript to run this app.</noscript>
<p>
<a href="/">QotNews</a>
<br />
<span class="slogan">Hacker News, Reddit, Lobsters, and Tildes articles rendered in reader mode.</span>
</p>
</div>
{% if story %}
<div class="{% if show_comments %}container{% else %}article-container{% endif %}">
<div class="article">
<h1>{{ story.title }}</h1>
{% if show_comments %}
<div class="info">
<a href="/{{ story.id }}">View article</a>
</div>
{% else %}
<div class="info">
Source: <a class="source" href="{{ story.url or story.link }}">{{ url }}</a>
</div>
{% endif %}
<div class="info">
{{ story.score }} points
by <a href="{{ story.author_link }}">{{ story.author }}</a>
{{ story.date | fromnow }}
on <a href="{{ story.link }}">{{ story.source }}</a> |
<a href="/{{ story.id }}/c">
{{ story.num_comments }} comment{{ 's' if story.num_comments != 1 }}
</a>
</div>
{% if not show_comments and story.text %}
<div class="story-text">{{ story.text | safe }}</div>
{% elif show_comments %}
{% macro render_comment(comment, level) %}
<dt></dt>
<dd class="comment{% if level > 0 %} lined{% endif %}">
<div class="info">
<p>
{% if comment.author == story.author %}[OP] {% endif %}{{ comment.author or '[Deleted]' }} | <a href="#{{ comment.author }}{{ comment.date }}" id="{{ comment.author }}{{ comment.date }}">{{ comment.date | fromnow }}</a>
</p>
</div>
<div class="text">{{ (comment.text | safe) if comment.text else '<p>[Empty / deleted comment]</p>' }}</div>
{% if comment.comments %}
<dl>
{% for reply in comment.comments %}
{{ render_comment(reply, level + 1) }}
{% endfor %}
</dl>
{% endif %}
</dd>
{% endmacro %}
<dl class="comments">
{% for comment in story.comments %}{{ render_comment(comment, 0) }}{% endfor %}
</dl>
{% endif %}
</div>
<div class='dot toggleDot'>
<div class='button'>
<a href="/{{ story.id }}{{ '/c' if not show_comments else '' }}">
{{ '' if not show_comments else '' }}
</a>
</div>
</div>
</div>
{% elif stories %}
<div class="container">
{% for story in stories %}
<div class='item'>
<div class='title'>
<a class='link' href='/{{ story.id }}'>
<img class='source-logo' src='/logos/{{ story.source }}.png' alt='{{ story.source }}:' /> {{ story.title }}
</a>
<span class='source'>
(<a class='source' href='{{ story.url or story.link }}'>{{ story.hostname }}</a>)
</span>
</div>
<div class='info'>
{{ story.score }} points
by <a href="{{ story.author_link }}">{{ story.author }}</a>
{{ story.date | fromnow }}
on <a href="{{ story.link }}">{{ story.source }}</a> |
<a class="{{ 'hot' if story.num_comments > 99 else '' }}" href="/{{ story.id }}/c">
{{ story.num_comments }} comment{{ 's' if story.num_comments != 1 }}
</a>
</div>
</div>
{% endfor %}
</div>
{% endif %}
</div> </div>
<div id="root"></div>
<!-- <!--
This HTML file is a template. This HTML file is a template.
If you open it directly in the browser, you will see an empty page. If you open it directly in the browser, you will see an empty page.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 981 B

View File

@@ -1,129 +1,85 @@
import React, { useState, useEffect, useRef, useCallback } from 'react'; import React from 'react';
import { BrowserRouter as Router, Route, Link, Switch } from 'react-router-dom'; import { BrowserRouter as Router, Route, Link, Switch } from 'react-router-dom';
import localForage from 'localforage'; import localForage from 'localforage';
import './Style-light.css'; import './Style-light.css';
import './Style-dark.css'; import './Style-dark.css';
import './Style-black.css';
import './Style-red.css';
import './fonts/Fonts.css'; import './fonts/Fonts.css';
import { BackwardDot, ForwardDot } from './utils.js'; import { ForwardDot } from './utils.js';
import Feed from './Feed.js';
import Article from './Article.js';
import Comments from './Comments.js';
import Search from './Search.js'; import Search from './Search.js';
import Submit from './Submit.js'; import Submit from './Submit.js';
import Results from './Results.js';
import ScrollToTop from './ScrollToTop.js'; import ScrollToTop from './ScrollToTop.js';
import Feed from './pages/Feed.js';
import Article from './pages/Article.js';
import Comments from './pages/Comments.js';
import Results from './pages/Results.js';
function App() {
const [theme, setTheme] = useState(localStorage.getItem('theme') || '');
const cache = useRef({});
const [isFullScreen, setIsFullScreen] = useState(!!document.fullscreenElement);
const updateCache = useCallback((key, value) => { class App extends React.Component {
cache.current[key] = value; constructor(props) {
}, []); super(props);
const light = () => { this.state = {
setTheme(''); theme: localStorage.getItem('theme') || '',
};
this.cache = {};
}
updateCache = (key, value) => {
this.cache[key] = value;
}
light() {
this.setState({ theme: '' });
localStorage.setItem('theme', ''); localStorage.setItem('theme', '');
}; }
const dark = () => { dark() {
setTheme('dark'); this.setState({ theme: 'dark' });
localStorage.setItem('theme', 'dark'); localStorage.setItem('theme', 'dark');
}; }
const black = () => { componentDidMount() {
setTheme('black'); if (!this.cache.length) {
localStorage.setItem('theme', 'black');
};
const red = () => {
setTheme('red');
localStorage.setItem('theme', 'red');
};
useEffect(() => {
if (Object.keys(cache.current).length === 0) {
localForage.iterate((value, key) => { localForage.iterate((value, key) => {
updateCache(key, value); this.updateCache(key, value);
}).then(() => {
console.log('loaded cache from localforage');
}); });
console.log('loaded cache from localforage');
} }
}, [updateCache]); }
const goFullScreen = () => { render() {
if ('wakeLock' in navigator) { const theme = this.state.theme;
navigator.wakeLock.request('screen'); document.body.style.backgroundColor = theme === 'dark' ? '#000' : '#eeeeee';
}
document.body.requestFullscreen({ navigationUI: 'hide' });
};
const exitFullScreen = () => { return (
document.exitFullscreen(); <div className={theme}>
}; <Router>
<div className='container menu'>
<p>
<Link to='/'>QotNews - Feed</Link>
<span className='theme'>Theme: <a href='#' onClick={() => this.light()}>Light</a> - <a href='#' onClick={() => this.dark()}>Dark</a></span>
<br />
<span className='slogan'>Reddit, Hacker News, and Tildes combined, then pre-rendered in reader mode.</span>
</p>
<Route path='/(|search)' component={Search} />
<Route path='/(|search)' component={Submit} />
</div>
useEffect(() => { <Route path='/' exact render={(props) => <Feed {...props} updateCache={this.updateCache} />} />
const onFullScreenChange = () => setIsFullScreen(!!document.fullscreenElement); <Switch>
document.addEventListener('fullscreenchange', onFullScreenChange); <Route path='/search' component={Results} />
return () => document.removeEventListener('fullscreenchange', onFullScreenChange); <Route path='/:id' exact render={(props) => <Article {...props} cache={this.cache} />} />
}, []); </Switch>
<Route path='/:id/c' exact render={(props) => <Comments {...props} cache={this.cache} />} />
useEffect(() => { <ForwardDot />
if (theme === 'dark') {
document.body.style.backgroundColor = '#1a1a1a';
} else if (theme === 'black') {
document.body.style.backgroundColor = '#000';
} else if (theme === 'red') {
document.body.style.backgroundColor = '#000';
} else {
document.body.style.backgroundColor = '#eeeeee';
}
}, [theme]);
const fullScreenAvailable = document.fullscreenEnabled || <ScrollToTop />
document.mozFullscreenEnabled || </Router>
document.webkitFullscreenEnabled || </div>
document.msFullscreenEnabled; );
}
return (
<div className={theme}>
<Router>
<div className='container menu'>
<p>
<Link to='/'>QotNews</Link>
<span className='theme'><a href='#' onClick={() => light()}>Light</a> - <a href='#' onClick={() => dark()}>Dark</a> - <a href='#' onClick={() => black()}>Black</a> - <a href='#' onClick={() => red()}>Red</a></span>
<br />
<span className='slogan'>Hacker News, Reddit, Lobsters, and Tildes articles rendered in reader mode.</span>
</p>
{fullScreenAvailable &&
<Route path='/(|search)' render={() => !isFullScreen ?
<button className='fullscreen' onClick={() => goFullScreen()}>Enter Fullscreen</button>
:
<button className='fullscreen' onClick={() => exitFullScreen()}>Exit Fullscreen</button>
} />
}
<Route path='/(|search)' component={Search} />
<Route path='/(|search)' component={Submit} />
</div>
<Route path='/' exact render={(props) => <Feed {...props} updateCache={updateCache} />} />
<Switch>
<Route path='/search' component={Results} />
<Route path='/:id' exact render={(props) => <Article {...props} cache={cache.current} />} />
</Switch>
<Route path='/:id/c' exact render={(props) => <Comments {...props} cache={cache.current} />} />
<BackwardDot />
<ForwardDot />
<ScrollToTop />
</Router>
</div>
);
} }
export default App; export default App;

View File

@@ -1,237 +0,0 @@
import React, { useState, useEffect } from 'react';
import { useParams } from 'react-router-dom';
import { Helmet } from 'react-helmet';
import localForage from 'localforage';
import { sourceLink, infoLine, ToggleDot } from './utils.js';
import Latex from 'react-latex-next';
import 'katex/dist/katex.min.css';
const VOID_ELEMENTS = ['area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'link', 'meta', 'param', 'source', 'track', 'wbr'];
const DANGEROUS_TAGS = ['svg', 'math'];
const latexDelimiters = [
{ left: '$$', right: '$$', display: true },
{ left: '\\[', right: '\\]', display: true },
{ left: '$', right: '$', display: false },
{ left: '\\(', right: '\\)', display: false }
];
function Article({ cache }) {
const { id } = useParams();
if (id in cache) console.log('cache hit');
const [story, setStory] = useState(cache[id] || false);
const [error, setError] = useState('');
const [pConv, setPConv] = useState([]);
const [copyButtonText, setCopyButtonText] = useState('\ue92c');
useEffect(() => {
localForage.getItem(id)
.then(
(value) => {
if (value) {
setStory(value);
}
}
);
fetch('/api/' + id)
.then(res => {
if (!res.ok) {
throw new Error(`Server responded with ${res.status} ${res.statusText}`);
}
return res.json();
})
.then(
(result) => {
setStory(result.story);
localForage.setItem(id, result.story);
},
(error) => {
const errorMessage = `Failed to fetch new article content (ID: ${id}). Your connection may be down or the server might be experiencing issues. ${error.toString()}.`;
setError(errorMessage);
}
);
}, [id]);
const copyLink = () => {
navigator.clipboard.writeText(`${story.title}:\n${window.location.href}`).then(() => {
setCopyButtonText('\uea10');
setTimeout(() => setCopyButtonText('\ue92c'), 2000);
}, () => {
setCopyButtonText('\uea0f');
setTimeout(() => setCopyButtonText('\ue92c'), 2000);
});
};
const pConvert = (n) => {
setPConv(prevPConv => [...prevPConv, n]);
};
const isCodeBlock = (v) => {
if (v.localName === 'pre') {
return true;
}
if (v.localName === 'code') {
if (v.closest('p')) {
return false;
}
const parent = v.parentElement;
if (parent) {
const nonWhitespaceChildren = Array.from(parent.childNodes).filter(n => {
return n.nodeType !== Node.TEXT_NODE || n.textContent.trim() !== '';
});
if (nonWhitespaceChildren.length === 1 && nonWhitespaceChildren[0] === v) {
return true;
}
}
}
return false;
};
const renderNodes = (nodes, keyPrefix = '') => {
return Array.from(nodes).map((v, k) => {
const key = `${keyPrefix}${k}`;
if (pConv.includes(key)) {
return (
<React.Fragment key={key}>
{v.textContent.split('\n\n').map((x, i) =>
<p key={i}>{x}</p>
)}
</React.Fragment>
);
}
if (v.nodeName === '#text') {
const text = v.data;
if (text.includes('\\[') || text.includes('\\(') || text.includes('$$') || /\$(?:[^$]*[^\s$])\$/.test(text)) {
return <Latex key={key} delimiters={latexDelimiters}>{text}</Latex>;
}
// Only wrap top-level text nodes in <p>
if (keyPrefix === '' && v.data.trim() !== '') {
return <p key={key}>{v.data}</p>;
}
return v.data;
}
if (v.nodeType !== Node.ELEMENT_NODE) {
return null;
}
if (DANGEROUS_TAGS.includes(v.localName)) {
return <span key={key} dangerouslySetInnerHTML={{ __html: v.outerHTML }} />;
}
const Tag = v.localName;
if (isCodeBlock(v)) {
return (
<React.Fragment key={key}>
<Tag dangerouslySetInnerHTML={{ __html: v.innerHTML }} />
<button onClick={() => pConvert(key)}>Convert Code to Paragraph</button>
</React.Fragment>
);
}
const textContent = v.textContent.trim();
const isMath = (textContent.startsWith('\\(') && textContent.endsWith('\\)')) ||
(textContent.startsWith('\\[') && textContent.endsWith('\\]')) ||
(textContent.startsWith('$$') && textContent.endsWith('$$')) ||
(textContent.startsWith('$') && textContent.endsWith('$') && textContent.indexOf('$') !== textContent.lastIndexOf('$') && !/\s/.test(textContent.charAt(textContent.length - 2)));
const props = { key: key };
if (v.hasAttributes()) {
for (const attr of v.attributes) {
const name = attr.name === 'class' ? 'className' : attr.name;
props[name] = attr.value;
}
}
if (isMath) {
let mathContent = v.textContent;
// align environment requires display math mode
if (mathContent.includes('\\begin{align')) {
const trimmed = mathContent.trim();
if (trimmed.startsWith('\\(')) {
// Replace \( and \) with \[ and \] to switch to display mode
const firstParen = mathContent.indexOf('\\(');
const lastParen = mathContent.lastIndexOf('\\)');
mathContent = mathContent.substring(0, firstParen) + '\\[' + mathContent.substring(firstParen + 2, lastParen) + '\\]' + mathContent.substring(lastParen + 2);
} else if (trimmed.startsWith('$') && !trimmed.startsWith('$$')) {
// Replace $ with $$
const firstDollar = mathContent.indexOf('$');
const lastDollar = mathContent.lastIndexOf('$');
if (firstDollar !== lastDollar) {
mathContent = mathContent.substring(0, firstDollar) + '$$' + mathContent.substring(firstDollar + 1, lastDollar) + '$$' + mathContent.substring(lastDollar + 1);
}
}
}
return <Tag {...props}><Latex delimiters={latexDelimiters}>{mathContent}</Latex></Tag>;
}
if (VOID_ELEMENTS.includes(Tag)) {
return <Tag {...props} />;
}
return (
<Tag {...props}>
{renderNodes(v.childNodes, `${key}-`)}
</Tag>
);
});
};
const nodes = (s) => {
if (s && s.text) {
let div = document.createElement('div');
div.innerHTML = s.text;
return div.childNodes;
}
return null;
};
const storyNodes = nodes(story);
return (
<div className='article-container'>
{error &&
<details style={{marginBottom: '1rem'}}>
<summary>Connection error? Click to expand.</summary>
<p>{error}</p>
{story && <p>Loaded article from cache.</p>}
</details>
}
{story ?
<div className='article'>
<Helmet>
<title>{story.title} | QotNews</title>
<meta name="robots" content="noindex" />
</Helmet>
<h1>{story.title} <button className='copy-button' onClick={copyLink}>{copyButtonText}</button></h1>
<div className='info'>
Source: {sourceLink(story)}
</div>
{infoLine(story)}
{storyNodes ?
<div className='story-text'>
{renderNodes(storyNodes)}
</div>
:
<p>Problem getting article :(</p>
}
</div>
:
<p>Loading...</p>
}
<ToggleDot id={id} article={false} />
</div>
);
}
export default Article;

View File

@@ -1,140 +0,0 @@
import React, { useState, useEffect } from 'react';
import { Link, useParams } from 'react-router-dom';
import { HashLink } from 'react-router-hash-link';
import { Helmet } from 'react-helmet';
import moment from 'moment';
import localForage from 'localforage';
import { infoLine, ToggleDot } from './utils.js';
function countComments(c) {
return c.comments.reduce((sum, x) => sum + countComments(x), 1);
}
function Comments({ cache }) {
const { id } = useParams();
if (id in cache) console.log('cache hit');
const [story, setStory] = useState(cache[id] || false);
const [error, setError] = useState('');
const [collapsed, setCollapsed] = useState([]);
const [expanded, setExpanded] = useState([]);
useEffect(() => {
localForage.getItem(id)
.then(
(value) => {
if (value) {
setStory(value);
}
}
);
fetch('/api/' + id)
.then(res => {
if (!res.ok) {
throw new Error(`Server responded with ${res.status} ${res.statusText}`);
}
return res.json();
})
.then(
(result) => {
setStory(result.story);
localForage.setItem(id, result.story);
const hash = window.location.hash.substring(1);
if (hash) {
setTimeout(() => {
const element = document.getElementById(hash);
if (element) {
element.scrollIntoView();
}
}, 0);
}
},
(error) => {
const errorMessage = `Failed to fetch comments (ID: ${id}). Your connection may be down or the server might be experiencing issues. ${error.toString()}.`;
setError(errorMessage);
}
);
}, [id]);
const collapseComment = (cid) => {
setCollapsed(prev => [...prev, cid]);
setExpanded(prev => prev.filter(x => x !== cid));
};
const expandComment = (cid) => {
setCollapsed(prev => prev.filter(x => x !== cid));
setExpanded(prev => [...prev, cid]);
};
const displayComment = (story, c, level) => {
const cid = c.author+c.date;
const isCollapsed = collapsed.includes(cid);
const isExpanded = expanded.includes(cid);
const hidden = isCollapsed || (level == 4 && !isExpanded);
const hasChildren = c.comments.length !== 0;
return (
<div className={level ? 'comment lined' : 'comment'} key={cid}>
<div className='info'>
<p>
{c.author === story.author ? '[OP]' : ''} {c.author || '[Deleted]'}
{' '} | <HashLink to={'#'+cid} id={cid}>{moment.unix(c.date).fromNow()}</HashLink>
{hidden || hasChildren &&
<button className='collapser pointer' onClick={() => collapseComment(cid)}></button>
}
</p>
</div>
<div className={isCollapsed ? 'text hidden' : 'text'} dangerouslySetInnerHTML={{ __html: c.text || '<p>[Empty / deleted comment]</p>'}} />
{hidden && hasChildren ?
<button className='comment lined info pointer' onClick={() => expandComment(cid)}>[show {countComments(c)-1} more]</button>
:
c.comments.map(i => displayComment(story, i, level + 1))
}
</div>
);
};
return (
<div className='container'>
{error &&
<details style={{marginBottom: '1rem'}}>
<summary>Connection error? Click to expand.</summary>
<p>{error}</p>
{story && <p>Loaded comments from cache.</p>}
</details>
}
{story ?
<div className='article'>
<Helmet>
<title>{story.title} | QotNews</title>
<meta name="robots" content="noindex" />
</Helmet>
<h1>{story.title}</h1>
<div className='info'>
<Link to={'/' + story.id}>View article</Link>
</div>
{infoLine(story)}
<div className='comments'>
{story.comments.map(c => displayComment(story, c, 0))}
</div>
</div>
:
<p>loading...</p>
}
<ToggleDot id={id} article={true} />
</div>
);
}
export default Comments;

View File

@@ -1,159 +0,0 @@
import React, { useState, useEffect } from 'react';
import { Link } from 'react-router-dom';
import { Helmet } from 'react-helmet';
import localForage from 'localforage';
import { sourceLink, infoLine, logos } from './utils.js';
function Feed({ updateCache }) {
const [stories, setStories] = useState(() => JSON.parse(localStorage.getItem('stories')) || false);
const [error, setError] = useState('');
const [loadingStatus, setLoadingStatus] = useState(null);
const [filterSmallweb, setFilterSmallweb] = useState(() => localStorage.getItem('filterSmallweb') === 'true');
const handleFilterChange = e => {
const isChecked = e.target.checked;
setStories(false);
setFilterSmallweb(isChecked);
localStorage.setItem('filterSmallweb', isChecked);
};
useEffect(() => {
const controller = new AbortController();
fetch(filterSmallweb ? '/api?smallweb=true' : '/api', { signal: controller.signal })
.then(res => {
if (!res.ok) {
throw new Error(`Server responded with ${res.status} ${res.statusText}`);
}
return res.json();
})
.then(
async (result) => {
const newApiStories = result.stories;
const updated = !stories || !stories.length || stories[0].id !== newApiStories[0].id;
console.log('New stories available:', updated);
if (!updated) return;
setLoadingStatus({ current: 0, total: newApiStories.length });
let currentStories = Array.isArray(stories) ? [...stories] : [];
let preloadedCount = 0;
for (const [index, newStory] of newApiStories.entries()) {
if (controller.signal.aborted) {
break;
}
try {
const storyFetchController = new AbortController();
const timeoutId = setTimeout(() => storyFetchController.abort(), 10000); // 10-second timeout
const storyRes = await fetch('/api/' + newStory.id, { signal: storyFetchController.signal });
clearTimeout(timeoutId);
if (!storyRes.ok) {
throw new Error(`Server responded with ${storyRes.status} ${storyRes.statusText}`);
}
const storyResult = await storyRes.json();
const fullStory = storyResult.story;
await localForage.setItem(fullStory.id, fullStory);
console.log('Preloaded story:', fullStory.id, fullStory.title);
updateCache(fullStory.id, fullStory);
preloadedCount++;
setLoadingStatus({ current: preloadedCount, total: newApiStories.length });
const existingStoryIndex = currentStories.findIndex(s => s.id === newStory.id);
if (existingStoryIndex > -1) {
currentStories.splice(existingStoryIndex, 1);
}
currentStories.splice(index, 0, newStory);
localStorage.setItem('stories', JSON.stringify(currentStories));
setStories(currentStories);
} catch (error) {
let errorMessage;
if (error.name === 'AbortError') {
errorMessage = `The request to fetch story '${newStory.title}' (${newStory.id}) timed out after 10 seconds. Your connection may be unstable. (${preloadedCount} / ${newApiStories.length} stories preloaded)`;
console.log('Fetch timed out for story:', newStory.id);
} else {
errorMessage = `An error occurred while fetching story '${newStory.title}' (ID: ${newStory.id}): ${error.toString()}. (${preloadedCount} / ${newApiStories.length} stories preloaded)`;
console.log('Fetch failed for story:', newStory.id, error);
}
setError(errorMessage);
break;
}
}
const finalStories = currentStories.slice(0, newApiStories.length);
const removedStories = currentStories.slice(newApiStories.length);
for (const story of removedStories) {
console.log('Removed story:', story.id, story.title);
localForage.removeItem(story.id);
}
localStorage.setItem('stories', JSON.stringify(finalStories));
setStories(finalStories);
setLoadingStatus(null);
},
(error) => {
if (error.name === 'AbortError') {
console.log('Feed fetch aborted.');
return;
}
const errorMessage = `Failed to fetch the main story list from the API. Your connection may be down or the server might be experiencing issues. ${error.toString()}.`;
setError(errorMessage);
}
);
return () => controller.abort();
}, [updateCache, filterSmallweb]);
return (
<div className='container'>
<Helmet>
<title>QotNews</title>
<meta name="robots" content="index" />
</Helmet>
<div style={{marginBottom: '1rem'}}>
<input type="checkbox" id="filter-smallweb" className="checkbox" checked={filterSmallweb} onChange={handleFilterChange} />
<label htmlFor="filter-smallweb">Only Smallweb</label>
</div>
{error &&
<details style={{marginBottom: '1rem'}}>
<summary>Connection error? Click to expand.</summary>
<p>{error}</p>
{stories && <p>Loaded feed from cache.</p>}
</details>
}
{stories ?
<div>
{stories.map(x =>
<div className='item' key={x.id}>
<div className='title'>
<Link className='link' to={'/' + x.id}>
<img className='source-logo' src={logos[x.source]} alt='source logo' /> {x.title}
</Link>
<span className='source'>
({sourceLink(x)})
</span>
</div>
{infoLine(x)}
</div>
)}
</div>
:
<p>Loading...</p>
}
{loadingStatus && <p>Preloading stories {loadingStatus.current} / {loadingStatus.total}...</p>}
</div>
);
}
export default Feed;

View File

@@ -1,73 +0,0 @@
import React, { useState, useEffect } from 'react';
import { Link, useLocation } from 'react-router-dom';
import { Helmet } from 'react-helmet';
import { sourceLink, infoLine, logos } from './utils.js';
import AbortController from 'abort-controller';
function Results() {
const [stories, setStories] = useState(false);
const [error, setError] = useState(false);
const location = useLocation();
useEffect(() => {
const controller = new AbortController();
const signal = controller.signal;
const search = location.search;
fetch('/api/search' + search, { method: 'get', signal: signal })
.then(res => res.json())
.then(
(result) => {
setStories(result.hits);
},
(error) => {
if (error.message !== 'The operation was aborted. ') {
setError(true);
}
}
);
return () => {
controller.abort();
};
}, [location.search]);
return (
<div className='container'>
<Helmet>
<title>Search Results | QotNews</title>
</Helmet>
{error && <p>Connection error?</p>}
{stories ?
<>
<p>Search results:</p>
<div className='comment lined'>
{stories.length ?
stories.map(x =>
<div className='item' key={x.id}>
<div className='title'>
<Link className='link' to={'/' + x.id}>
<img className='source-logo' src={logos[x.source]} alt='source logo' /> {x.title}
</Link>
<span className='source'>
({sourceLink(x)})
</span>
</div>
{infoLine(x)}
</div>
)
:
<p>none</p>
}
</div>
</>
:
<p>loading...</p>
}
</div>
);
}
export default Results;

View File

@@ -15,7 +15,6 @@ class ScrollToTop extends React.Component {
} }
window.scrollTo(0, 0); window.scrollTo(0, 0);
document.body.scrollTop = 0;
} }
render() { render() {

View File

@@ -1,46 +1,51 @@
import React, { useState, useRef } from 'react'; import React, { Component } from 'react';
import { useHistory, useLocation } from 'react-router-dom'; import { withRouter } from 'react-router-dom';
import queryString from 'query-string'; import queryString from 'query-string';
const getSearch = location => queryString.parse(location.search).q || ''; const getSearch = props => queryString.parse(props.location.search).q;
function Search() { class Search extends Component {
const history = useHistory(); constructor(props) {
const location = useLocation(); super(props);
const [search, setSearch] = useState(getSearch(location)); this.state = {search: getSearch(this.props)};
const inputRef = useRef(null); this.inputRef = React.createRef();
}
const searchArticles = (event) => { searchArticles = (event) => {
const newSearch = event.target.value; const search = event.target.value;
setSearch(newSearch); this.setState({search: search});
if (newSearch.length >= 3) { if (search.length >= 3) {
const searchQuery = queryString.stringify({ 'q': newSearch }); const searchQuery = queryString.stringify({ 'q': search });
history.replace('/search?' + searchQuery); this.props.history.replace('/search?' + searchQuery);
} else { } else {
history.replace('/'); this.props.history.replace('/');
} }
} }
const searchAgain = (event) => { searchAgain = (event) => {
event.preventDefault(); event.preventDefault();
const searchString = queryString.stringify({ 'q': event.target[0].value }); const searchString = queryString.stringify({ 'q': event.target[0].value });
history.push('/search?' + searchString); this.props.history.push('/search?' + searchString);
inputRef.current.blur(); this.inputRef.current.blur();
} }
return ( render() {
<span className='search'> const search = this.state.search;
<form onSubmit={searchAgain}>
<input return (
placeholder='Search...' <span className='search'>
value={search} <form onSubmit={this.searchAgain}>
onChange={searchArticles} <input
ref={inputRef} placeholder='Search... (fixed)'
/> value={search}
</form> onChange={this.searchArticles}
</span> ref={this.inputRef}
); />
</form>
</span>
);
}
} }
export default Search; export default withRouter(Search);

View File

@@ -1,77 +0,0 @@
.black {
color: #ddd;
}
.black a {
color: #ddd;
}
.black input {
color: #ddd;
border: 1px solid #828282;
}
.black .menu button,
.black .story-text button {
background-color: #444444;
border-color: #bbb;
color: #ddd;
}
.black .item {
color: #828282;
}
.black .item .source-logo {
filter: grayscale(1);
}
.black .item a {
color: #828282;
}
.black .item a.link {
color: #ddd;
}
.black .item a.link:visited {
color: #828282;
}
.black .item .info a.hot {
color: #cccccc;
}
.black .article a {
border-bottom: 1px solid #aaaaaa;
}
.black .article u {
border-bottom: 1px solid #aaaaaa;
text-decoration: none;
}
.black .story-text video,
.black .story-text img {
filter: brightness(50%);
}
.black .article .info {
color: #828282;
}
.black .article .info a {
border-bottom: none;
color: #828282;
}
.black .comment.lined {
border-left: 1px solid #444444;
}
.black .checkbox:checked + label::after {
border-color: #eee;
}
.black .copy-button {
color: #828282;
}

View File

@@ -11,17 +11,14 @@
border: 1px solid #828282; border: 1px solid #828282;
} }
.dark .menu button,
.dark .story-text button {
background-color: #444444;
border-color: #bbb;
color: #ddd;
}
.dark .item { .dark .item {
color: #828282; color: #828282;
} }
.dark .item .source-logo {
filter: grayscale(1);
}
.dark .item a { .dark .item a {
color: #828282; color: #828282;
} }
@@ -46,7 +43,6 @@
text-decoration: none; text-decoration: none;
} }
.dark .story-text video,
.dark .story-text img { .dark .story-text img {
filter: brightness(50%); filter: brightness(50%);
} }
@@ -63,11 +59,3 @@
.dark .comment.lined { .dark .comment.lined {
border-left: 1px solid #444444; border-left: 1px solid #444444;
} }
.dark .checkbox:checked + label::after {
border-color: #eee;
}
.dark .copy-button {
color: #828282;
}

View File

@@ -2,30 +2,9 @@ body {
text-rendering: optimizeLegibility; text-rendering: optimizeLegibility;
font: 1rem/1.3 sans-serif; font: 1rem/1.3 sans-serif;
color: #000000; color: #000000;
margin-bottom: 100vh;
word-break: break-word; word-break: break-word;
font-kerning: normal; font-kerning: normal;
margin: 0;
}
::backdrop {
background-color: rgba(0,0,0,0);
}
body:fullscreen {
overflow-y: scroll !important;
}
body:-ms-fullscreen {
overflow-y: scroll !important;
}
body:-webkit-full-screen {
overflow-y: scroll !important;
}
body:-moz-full-screen {
overflow-y: scroll !important;
}
#root {
margin: 8px 8px 100vh 8px !important;
} }
a { a {
@@ -43,21 +22,10 @@ input {
border-radius: 4px; border-radius: 4px;
} }
.fullscreen {
margin: 0.25rem;
padding: 0.25rem;
}
pre { pre {
overflow: auto; overflow: auto;
} }
.comments pre {
overflow: auto;
white-space: pre-wrap;
overflow-wrap: break-word;
}
.container { .container {
margin: 1rem auto; margin: 1rem auto;
max-width: 64rem; max-width: 64rem;
@@ -126,13 +94,6 @@ span.source {
border-bottom: 1px solid #222222; border-bottom: 1px solid #222222;
} }
.article-title {
display: flex;
align-items: center;
margin-top: 0.67em;
margin-bottom: 0.67em;
}
.article h1 { .article h1 {
font-size: 1.6rem; font-size: 1.6rem;
} }
@@ -189,13 +150,6 @@ span.source {
.comments { .comments {
margin-left: -1.25rem; margin-left: -1.25rem;
margin-top: 0;
margin-bottom: 0;
padding: 0;
}
.comments dl, .comments dd {
margin: 0;
} }
.comment { .comment {
@@ -208,11 +162,6 @@ span.source {
.comment .text { .comment .text {
margin-top: -0.5rem; margin-top: -0.5rem;
margin-bottom: 1rem;
}
.comment .text > * {
margin-bottom: 0;
} }
.comment .text.hidden > p { .comment .text.hidden > p {
@@ -232,49 +181,20 @@ span.source {
padding-right: 1.5rem; padding-right: 1.5rem;
} }
button.collapser {
background: transparent;
border: none;
margin: 0;
padding-top: 0;
padding-bottom: 0;
font: inherit;
color: inherit;
}
button.comment {
background: transparent;
border-top: none;
border-right: none;
border-bottom: none;
margin: 0;
padding-top: 0;
padding-right: 0;
padding-bottom: 0;
font: inherit;
color: inherit;
text-align: left;
width: 100%;
}
.comment .pointer { .comment .pointer {
cursor: pointer; cursor: pointer;
} }
.dot { .toggleDot {
cursor: pointer;
position: fixed; position: fixed;
bottom: 1rem;
left: 1rem;
height: 3rem; height: 3rem;
width: 3rem; width: 3rem;
background-color: #828282; background-color: #828282;
border-radius: 50%; border-radius: 50%;
} }
.toggleDot {
bottom: 1rem;
left: 1rem;
}
.toggleDot .button { .toggleDot .button {
font: 2rem/1 'icomoon'; font: 2rem/1 'icomoon';
position: relative; position: relative;
@@ -283,110 +203,23 @@ button.comment {
} }
.forwardDot { .forwardDot {
cursor: pointer;
position: fixed;
bottom: 1rem; bottom: 1rem;
right: 1rem; right: 1rem;
height: 3rem;
width: 3rem;
background-color: #828282;
border-radius: 50%;
} }
.forwardDot .button { .forwardDot .button {
font: 2rem/1 'icomoon'; font: 2.5rem/1 'icomoon';
position: relative; position: relative;
top: 0.5rem; top: 0.25rem;
left: 0.5rem; left: 0.3rem;
}
.backwardDot {
bottom: 1rem;
right: 5rem;
}
.backwardDot .button {
font: 2rem/1 'icomoon';
position: relative;
top: 0.5rem;
left: 0.5rem;
} }
.search form { .search form {
display: inline; display: inline;
} }
.copy-button {
font: 1.5rem/1 'icomoon2';
color: #828282;
background: transparent;
border: none;
cursor: pointer;
vertical-align: middle;
}
.checkbox {
-webkit-appearance: none;
appearance: none;
position: absolute;
opacity: 0;
cursor: pointer;
height: 0;
width: 0;
}
.checkbox + label {
position: relative;
cursor: pointer;
padding-left: 1.75rem;
user-select: none;
}
.checkbox + label::before {
content: '';
position: absolute;
left: 0;
top: 0.1em;
width: 1rem;
height: 1rem;
border: 1px solid #828282;
background-color: transparent;
border-radius: 3px;
}
.checkbox:checked + label::after {
content: "";
position: absolute;
left: 0.35rem;
top: 0.2em;
width: 0.3rem;
height: 0.6rem;
border-style: solid;
border-color: #000;
border-width: 0 2px 2px 0;
transform: rotate(45deg);
}
.tooltip .tooltiptext {
visibility: hidden;
width: 140px;
background-color: #555;
color: #fff;
text-align: center;
border-radius: 6px;
padding: 5px 0;
position: absolute;
z-index: 1;
bottom: 110%;
left: 50%;
margin-left: -70px;
opacity: 0;
transition: opacity 0.2s;
font-size: 0.9rem;
line-height: 1.3;
}
.forwardDot .tooltiptext {
left: auto;
right: 0;
margin-left: 0;
}
.tooltip.show-tooltip .tooltiptext {
visibility: visible;
opacity: 1;
}

View File

@@ -1,95 +0,0 @@
.red {
color: #b00;
scrollbar-color: #b00 #440000;
}
.red a {
color: #b00;
}
.red input {
color: #b00;
border: 1px solid #690000;
}
.red input::placeholder {
color: #690000;
}
.red hr {
background-color: #690000;
}
.red .menu button,
.red .story-text button {
background-color: #440000;
border-color: #b00;
color: #b00;
}
.red .item,
.red .slogan {
color: #690000;
}
.red .item .source-logo {
display: none;
}
.red .item a {
color: #690000;
}
.red .item a.link {
color: #b00;
}
.red .item a.link:visited {
color: #690000;
}
.red .item .info a.hot {
color: #cc0000;
}
.red .article a {
border-bottom: 1px solid #aa0000;
}
.red .article u {
border-bottom: 1px solid #aa0000;
text-decoration: none;
}
.red .story-text video,
.red .story-text img {
filter: grayscale(100%) brightness(20%) sepia(100%) hue-rotate(-50deg) saturate(600%) contrast(0.8);
}
.red .article .info {
color: #690000;
}
.red .article .info a {
border-bottom: none;
color: #690000;
}
.red .comment.lined {
border-left: 1px solid #440000;
}
.red .dot {
background-color: #440000;
}
.red .checkbox + label::before {
border: 1px solid #690000;
}
.red .checkbox:checked + label::after {
border-color: #dd0000;
}
.red .copy-button {
color: #690000;
}

View File

@@ -1,53 +1,54 @@
import React, { useState, useRef } from 'react'; import React, { Component } from 'react';
import { useHistory } from 'react-router-dom'; import { withRouter } from 'react-router-dom';
function Submit() { class Submit extends Component {
const [progress, setProgress] = useState(null); constructor(props) {
const inputRef = useRef(null); super(props);
const history = useHistory();
const submitArticle = async (event) => { this.state = {
progress: null,
};
this.inputRef = React.createRef();
}
submitArticle = (event) => {
event.preventDefault(); event.preventDefault();
const url = event.target[0].value; const url = event.target[0].value;
inputRef.current.blur(); this.inputRef.current.blur();
setProgress('Submitting...'); this.setState({ progress: 'Submitting...' });
let data = new FormData(); let data = new FormData();
data.append('url', url); data.append('url', url);
try { fetch('/api/submit', { method: 'POST', body: data })
const res = await fetch('/api/submit', { method: 'POST', body: data }); .then(res => res.json())
.then(
if (res.ok) { (result) => {
const result = await res.json(); this.props.history.replace('/' + result.nid);
history.replace('/' + result.nid); },
} else { (error) => {
let errorData; this.setState({ progress: 'Error' });
try {
errorData = await res.json();
} catch (jsonError) {
// Not a JSON error from our API, so it's a server issue
throw new Error(`Server responded with ${res.status} ${res.statusText}`);
} }
setProgress(errorData.error || 'An unknown error occurred.'); );
}
} catch (error) {
setProgress(`Error: ${error.toString()}`);
}
} }
return ( render() {
<span className='search'> const progress = this.state.progress;
<form onSubmit={submitArticle}>
<input return (
placeholder='Submit URL' <span className='search'>
ref={inputRef} <form onSubmit={this.submitArticle}>
/> <input
</form> placeholder='Submit Article'
{progress && <p>{progress}</p>} ref={this.inputRef}
</span> />
); </form>
{progress ? progress : ''}
</span>
);
}
} }
export default Submit; export default withRouter(Submit);

View File

@@ -0,0 +1,34 @@
import React from "react";
import { Link } from "react-router-dom";
import { sourceLink, infoLine, getLogoUrl } from "../utils.js";
export class StoryItem extends React.Component {
constructor(props) {
super(props);
}
render() {
const story = this.props.story;
const { id, title } = story;
return (
<div className="item" key={id}>
<div className="title">
<Link className="link" to={"/" + id}>
<img
className="source-logo"
src={getLogoUrl(story)}
alt="source logo"
/>
{" "}
{title}
</Link>
<span className="source">({sourceLink(story)})</span>
</div>
{infoLine(story)}
</div>
);
}
}

View File

@@ -26,8 +26,3 @@
font-family: 'Icomoon'; font-family: 'Icomoon';
src: url('icomoon.ttf') format('truetype'); src: url('icomoon.ttf') format('truetype');
} }
@font-face {
font-family: 'Icomoon2';
src: url('icomoon2.ttf') format('truetype');
}

Binary file not shown.

Binary file not shown.

View File

@@ -8,4 +8,4 @@ ReactDOM.render(<App />, document.getElementById('root'));
// If you want your app to work offline and load faster, you can change // If you want your app to work offline and load faster, you can change
// // unregister() to register() below. Note this comes with some pitfalls. // // unregister() to register() below. Note this comes with some pitfalls.
// // Learn more about service workers: https://bit.ly/CRA-PWA // // Learn more about service workers: https://bit.ly/CRA-PWA
serviceWorker.unregister(); serviceWorker.register();

View File

@@ -0,0 +1,112 @@
import React from 'react';
import { Helmet } from 'react-helmet';
import localForage from 'localforage';
import { sourceLink, infoLine, ToggleDot } from '../utils.js';
class Article extends React.Component {
constructor(props) {
super(props);
const id = this.props.match ? this.props.match.params.id : 'CLOL';
const cache = this.props.cache;
if (id in cache) console.log('cache hit');
this.state = {
story: cache[id] || false,
error: false,
pConv: [],
};
}
componentDidMount() {
const id = this.props.match ? this.props.match.params.id : 'CLOL';
localForage.getItem(id)
.then(
(value) => {
if (value) {
this.setState({ story: value });
}
}
);
fetch('/api/' + id)
.then(res => res.json())
.then(
(result) => {
this.setState({ story: result.story });
localForage.setItem(id, result.story);
},
(error) => {
this.setState({ error: true });
}
);
}
pConvert = (n) => {
this.setState({ pConv: [...this.state.pConv, n] });
}
render() {
const id = this.props.match ? this.props.match.params.id : 'CLOL';
const story = this.state.story;
const error = this.state.error;
const pConv = this.state.pConv;
let nodes = null;
if (story.text) {
let div = document.createElement('div');
div.innerHTML = story.text;
nodes = div.childNodes;
}
return (
<div className='article-container'>
{error && <p>Connection error?</p>}
{story ?
<div className='article'>
<Helmet>
<title>{story.title} - QotNews</title>
</Helmet>
<h1>{story.title}</h1>
<div className='info'>
Source: {sourceLink(story)}
</div>
{infoLine(story)}
{nodes ?
<div className='story-text'>
{Object.entries(nodes).map(([k, v]) =>
pConv.includes(k) ?
v.innerHTML.split('\n\n').map(x =>
<p dangerouslySetInnerHTML={{ __html: x }} />
)
:
(v.nodeName === '#text' ?
<p>{v.data}</p>
:
<>
<v.localName dangerouslySetInnerHTML={v.innerHTML ? { __html: v.innerHTML } : null} />
{v.localName == 'pre' && <button onClick={() => this.pConvert(k)}>Convert Code to Paragraph</button>}
</>
)
)}
</div>
:
<p>Problem getting article :(</p>
}
</div>
:
<p>loading...</p>
}
<ToggleDot id={id} article={false} />
</div>
);
}
}
export default Article;

View File

@@ -0,0 +1,145 @@
import React from 'react';
import { Link } from 'react-router-dom';
import { HashLink } from 'react-router-hash-link';
import { Helmet } from 'react-helmet';
import moment from 'moment';
import localForage from 'localforage';
import { infoLine, ToggleDot } from '../utils.js';
class Article extends React.Component {
constructor(props) {
super(props);
const id = this.props.match.params.id;
const cache = this.props.cache;
if (id in cache) console.log('cache hit');
this.state = {
story: cache[id] || false,
error: false,
collapsed: [],
expanded: [],
};
}
componentDidMount() {
const id = this.props.match.params.id;
localForage.getItem(id)
.then(
(value) => {
this.setState({ story: value });
}
);
fetch('/api/' + id)
.then(res => res.json())
.then(
(result) => {
this.setState({ story: result.story }, () => {
const hash = window.location.hash.substring(1);
if (hash) {
document.getElementById(hash).scrollIntoView();
}
});
localForage.setItem(id, result.story);
},
(error) => {
this.setState({ error: true });
}
);
}
collapseComment(cid) {
this.setState(prevState => ({
...prevState,
collapsed: [...prevState.collapsed, cid],
expanded: prevState.expanded.filter(x => x !== cid),
}));
}
expandComment(cid) {
this.setState(prevState => ({
...prevState,
collapsed: prevState.collapsed.filter(x => x !== cid),
expanded: [...prevState.expanded, cid],
}));
}
countComments(c) {
return c.comments.reduce((sum, x) => sum + this.countComments(x), 1);
}
displayComment(story, c, level) {
const cid = c.author + c.date;
const collapsed = this.state.collapsed.includes(cid);
const expanded = this.state.expanded.includes(cid);
const hidden = collapsed || (level == 4 && !expanded);
const hasChildren = c.comments.length !== 0;
return (
<div className={level ? 'comment lined' : 'comment'} key={cid}>
<div className='info'>
<p>
{c.author === story.author ? '[OP]' : ''} {c.author || '[Deleted]'}
{' '} | <HashLink to={'#' + cid} id={cid}>{moment.unix(c.date).fromNow()}</HashLink>
{hasChildren && (
hidden ?
<span className='collapser expander pointer' onClick={() => this.expandComment(cid)}>+</span>
:
<span className='collapser pointer' onClick={() => this.collapseComment(cid)}></span>
)}
</p>
</div>
<div className={collapsed ? 'text hidden' : 'text'} dangerouslySetInnerHTML={{ __html: c.text }} />
{hidden && hasChildren ?
<div className='comment lined info pointer' onClick={() => this.expandComment(cid)}>[show {this.countComments(c) - 1} more]</div>
:
c.comments.map(i => this.displayComment(story, i, level + 1))
}
</div>
);
}
render() {
const id = this.props.match.params.id;
const story = this.state.story;
const error = this.state.error;
return (
<div className='container'>
{error && <p>Connection error?</p>}
{story ?
<div className='article'>
<Helmet>
<title>{story.title} - QotNews Comments</title>
</Helmet>
<h1>{story.title}</h1>
<div className='info'>
<Link to={'/' + story.id}>View article</Link>
</div>
{infoLine(story)}
<div className='comments'>
{story.comments.map(c => this.displayComment(story, c, 0))}
</div>
</div>
:
<p>loading...</p>
}
<ToggleDot id={id} article={true} />
</div>
);
}
}
export default Article;

View File

@@ -0,0 +1,64 @@
import React from 'react';
import { Helmet } from 'react-helmet';
import localForage from 'localforage';
import { StoryItem } from '../components/StoryItem.js';
class Feed extends React.Component {
constructor(props) {
super(props);
this.state = {
stories: JSON.parse(localStorage.getItem('stories')) || false,
error: false,
};
}
componentDidMount() {
fetch('/api')
.then(res => res.json())
.then(
(result) => {
const updated = !this.state.stories || this.state.stories[0].id !== result.stories[0].id;
console.log('updated:', updated);
const { stories } = result;
this.setState({ stories });
localStorage.setItem('stories', JSON.stringify(stories));
if (updated) {
localForage.clear();
stories.forEach((x, i) => {
fetch('/api/' + x.id)
.then(res => res.json())
.then(({ story }) => {
localForage.setItem(x.id, story)
.then(console.log('preloaded', x.id, x.title));
this.props.updateCache(x.id, story);
}, error => { }
);
});
}
},
(error) => {
this.setState({ error: true });
}
);
}
render() {
const stories = this.state.stories;
const error = this.state.error;
return (
<div className='container'>
<Helmet>
<title>Feed - QotNews</title>
</Helmet>
{error && <p>Connection error?</p>}
{stories ? stories.map(story => <StoryItem story={story}></StoryItem>) : <p>loading...</p>}
</div>
);
}
}
export default Feed;

View File

@@ -0,0 +1,76 @@
import React from 'react';
import { Helmet } from 'react-helmet';
import AbortController from 'abort-controller';
import { StoryItem } from '../components/StoryItem.js';
class Results extends React.Component {
constructor(props) {
super(props);
this.state = {
stories: false,
error: false,
};
this.controller = null;
}
performSearch = () => {
if (this.controller) {
this.controller.abort();
}
this.controller = new AbortController();
const signal = this.controller.signal;
const search = this.props.location.search;
fetch('/api/search' + search, { method: 'get', signal: signal })
.then(res => res.json())
.then(
(result) => {
this.setState({ stories: result.results });
},
(error) => {
if (error.message !== 'The operation was aborted. ') {
this.setState({ error: true });
}
}
);
}
componentDidMount() {
this.performSearch();
}
componentDidUpdate(prevProps) {
if (this.props.location.search !== prevProps.location.search) {
this.performSearch();
}
}
render() {
const stories = this.state.stories;
const error = this.state.error;
return (
<div className='container'>
<Helmet>
<title>Feed - QotNews</title>
</Helmet>
{error && <p>Connection error?</p>}
{stories ?
<>
<p>Search results:</p>
<div className='comment lined'>
{stories ? stories.map(story => <StoryItem story={story}></StoryItem>) : <p>loading...</p>}
</div>
</>
:
<p>loading...</p>
}
</div>
);
}
}
export default Results;

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff