Move archive to Whoosh and add search

This commit is contained in:
2019-10-12 05:32:17 +00:00
parent 45b75b420b
commit 7cb87b59fe
14 changed files with 328 additions and 63 deletions

View File

@@ -107,3 +107,5 @@ db.sqlite3
praw.ini
data.db
data.db.bak
data/archive/*

52
apiserver/archive.py Normal file
View File

@@ -0,0 +1,52 @@
from whoosh.analysis import StemmingAnalyzer, CharsetFilter, NgramFilter
from whoosh.index import create_in, open_dir, exists_in
from whoosh.fields import *
from whoosh.qparser import QueryParser
from whoosh.support.charset import accent_map
analyzer = StemmingAnalyzer() | CharsetFilter(accent_map) | NgramFilter(minsize=3)
title_field = TEXT(analyzer=analyzer, stored=True)
id_field = ID(unique=True, stored=True)
schema = Schema(
id=id_field,
title=title_field,
story=STORED,
)
ARCHIVE_LOCATION = 'data/archive'
ix = None
def init():
global ix
if exists_in(ARCHIVE_LOCATION):
ix = open_dir(ARCHIVE_LOCATION)
else:
ix = create_in(ARCHIVE_LOCATION, schema)
def update(story):
writer = ix.writer()
writer.update_document(
id=story['id'],
title=story['title'],
story=story,
)
writer.commit()
def get_story(id):
with ix.searcher() as searcher:
result = searcher.document(id=id)
return result['story'] if result else None
def search(search):
with ix.searcher() as searcher:
query = QueryParser('title', ix.schema).parse(search)
results = searcher.search(query)
stories = [r['story'] for r in results]
for s in stories:
s.pop('text', '')
s.pop('comments', '')
return stories

View File

@@ -0,0 +1,26 @@
import shelve
import archive
archive.init()
#with shelve.open('data/data') as db:
# to_delete = []
#
# for s in db.values():
# if 'title' in s:
# archive.update(s)
# if 'id' in s:
# to_delete.append(s['id'])
#
# for id in to_delete:
# del db[id]
#
# for s in db['news_cache'].values():
# if 'title' in s:
# archive.update(s)
#with shelve.open('data/whoosh') as db:
# for s in db['news_cache'].values():
# if 'title' in s and not archive.get_story(s['id']):
# archive.update(s)

View File

@@ -9,6 +9,7 @@ import time
import shelve
from urllib.parse import urlparse
import archive
import feed
from utils import gen_rand_id
@@ -16,6 +17,8 @@ from flask import abort, Flask, request, render_template
from werkzeug.exceptions import NotFound
from flask_cors import CORS
archive.init()
CACHE_LENGTH = 300
DATA_FILE = 'data/data'
@@ -29,11 +32,9 @@ with shelve.open(DATA_FILE) as db:
def get_story(id):
if id in news_cache:
return {'story': news_cache[id]}
with shelve.open(DATA_FILE) as db:
if id in db:
return {'story': db[id]}
return None
return news_cache[id]
else:
return archive.get_story(id)
build_folder = '../webclient/build'
flask_app = Flask(__name__, template_folder=build_folder, static_folder=build_folder, static_url_path='')
@@ -42,15 +43,26 @@ cors = CORS(flask_app)
@flask_app.route('/api')
def api():
front_page = [news_cache[news_ref_to_id[ref]] for ref in news_list]
front_page = [copy.copy(x) for x in front_page if 'text' in x and x['text']][:100]
for story in front_page:
if 'comments' in story: story.pop('comments')
if 'text' in story: story.pop('text')
front_page = [x for x in front_page if 'title' in x and x['title']]
front_page = front_page[:100]
to_remove = ['text', 'comments']
front_page = [{k:v for k,v in s.items() if k not in to_remove} for s in front_page]
return {'stories': front_page}
@flask_app.route('/api/search', strict_slashes=False)
def search():
search = request.args.get('q', '')
if len(search) >= 3:
res = archive.search(search)
else:
res = []
return {'results': res}
@flask_app.route('/api/<id>')
def story(id):
return get_story(id) or abort(404)
story = get_story(id)
return dict(story=story) if story else abort(404)
@flask_app.route('/')
def index():
@@ -68,10 +80,7 @@ def static_story(id):
pass
story = get_story(id)
if story:
story = story['story']
else:
return abort(404)
if not story: return abort(404)
score = story['score']
num_comments = story['num_comments']
@@ -94,23 +103,20 @@ web_thread.start()
def new_id():
nid = gen_rand_id()
with shelve.open(DATA_FILE) as db:
while nid in news_cache or nid in db:
nid = gen_rand_id()
while nid in news_cache or archive.get_story(nid):
nid = gen_rand_id()
return nid
def remove_ref(old_ref, archive=False):
def remove_ref(old_ref):
while old_ref in news_list:
news_list.remove(old_ref)
old_story = news_cache.pop(news_ref_to_id[old_ref])
old_id = news_ref_to_id.pop(old_ref)
logging.info('Removed ref {} id {}.'.format(old_ref, old_id))
if archive:
with shelve.open(DATA_FILE) as db:
db[old_id] = old_story
try:
while True:
# onboard new stories
if news_index == 0:
feed_list = feed.list()
new_items = [(ref, source) for ref, source in feed_list if ref not in news_list]
@@ -123,16 +129,20 @@ try:
if len(new_items):
logging.info('Added {} new refs.'.format(len(new_items)))
# drop old ones
while len(news_list) > CACHE_LENGTH:
old_ref = news_list[-1]
remove_ref(old_ref, archive=True)
remove_ref(old_ref)
# update current stories
if news_index < len(news_list):
update_ref = news_list[news_index]
update_id = news_ref_to_id[update_ref]
news_story = news_cache[update_id]
valid = feed.update_story(news_story)
if not valid:
if valid:
archive.update(news_story)
else:
remove_ref(update_ref)
time.sleep(3)