diff --git a/.gitmodules b/.gitmodules index b25f9f9..be8a66a 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ -[submodule "readerserver/scraper/browser/scripts/bypass-paywalls-chrome"] - path = readerserver/scraper/browser/scripts/bypass-paywalls-chrome - url = https://github.com/iamadamdev/bypass-paywalls-chrome.git +[submodule "readerserver/scraper/headless/scripts/bypass-paywalls-chrome"] + path = readerserver/scraper/headless/scripts/bypass-paywalls-chrome + url = https://github.com/iamadamdev/bypass-paywalls-chrome/ diff --git a/apiserver/feed.py b/apiserver/feed.py index 2ce9f1b..1e8c6eb 100644 --- a/apiserver/feed.py +++ b/apiserver/feed.py @@ -12,7 +12,7 @@ import settings from feeds import hackernews, reddit, tildes, substack, manual from feeds.sitemap import Sitemap from feeds.category import Category -from scrapers import outline, declutter, browser, local +from scrapers import outline, declutter, headless, simple INVALID_DOMAINS = ['youtube.com', 'bloomberg.com', 'wsj.com'] @@ -63,14 +63,14 @@ def get_list(): def get_article(url): scrapers = { - 'declutter': declutter, + 'headless': headless, + 'simple': simple, 'outline': outline, - 'browser': browser, - 'local': local, + 'declutter': declutter, } - available = settings.SCRAPERS or ['local'] - if 'local' not in available: - available += ['local'] + available = settings.SCRAPERS or ['headless', 'simple'] + if 'simple' not in available: + available += ['simple'] for scraper in available: if scraper not in scrapers.keys(): diff --git a/apiserver/scrapers/browser.py b/apiserver/scrapers/headless.py similarity index 81% rename from apiserver/scrapers/browser.py rename to apiserver/scrapers/headless.py index 3de7dd0..30639c3 100644 --- a/apiserver/scrapers/browser.py +++ b/apiserver/scrapers/headless.py @@ -4,13 +4,13 @@ logging.basicConfig( level=logging.DEBUG) import requests -READ_API = 'http://127.0.0.1:33843/browser/details' -READ_COMMENT__API = 'http://127.0.0.1:33843/browser/commentd' +READ_API = 'http://127.0.0.1:33843/headless/details' +READ_COMMENT__API = 'http://127.0.0.1:33843/headless/comments' TIMEOUT = 60 def get_html(url): - logging.info(f"Reader Scraper: {url}") + logging.info(f"Headless Browser Scraper: {url}") details = get_details(url) if not details: return '' @@ -25,7 +25,7 @@ def get_details(url): except KeyboardInterrupt: raise except BaseException as e: - logging.error('Problem Scraping article: {}'.format(str(e))) + logging.error('Problem scraping article: {}'.format(str(e))) return None def get_comments(url): diff --git a/apiserver/scrapers/local.py b/apiserver/scrapers/simple.py similarity index 85% rename from apiserver/scrapers/local.py rename to apiserver/scrapers/simple.py index dd81f93..6613bf0 100644 --- a/apiserver/scrapers/local.py +++ b/apiserver/scrapers/simple.py @@ -4,11 +4,11 @@ logging.basicConfig( level=logging.DEBUG) import requests -READ_API = 'http://127.0.0.1:33843/details' +READ_API = 'http://127.0.0.1:33843/simple/details' TIMEOUT = 20 def get_html(url): - logging.info(f"Local Scraper: {url}") + logging.info(f"Simple Scraper: {url}") details = get_details(url) if not details: return '' diff --git a/apiserver/settings.py.example b/apiserver/settings.py.example index 87d608d..797d6ba 100644 --- a/apiserver/settings.py.example +++ b/apiserver/settings.py.example @@ -51,7 +51,7 @@ CATEGORY = {} # ], # } -SCRAPERS = ['browser', 'declutter', 'outline', 'local'] +SCRAPERS = ['headless', 'outline', 'declutter', 'simple'] # Reddit account info # leave blank if not using Reddit diff --git a/readerserver/main.js b/readerserver/main.js index f0fe218..b318f53 100644 --- a/readerserver/main.js +++ b/readerserver/main.js @@ -2,12 +2,18 @@ const port = 33843; const express = require('express'); const app = express(); const simple = require('./scraper/simple'); -const browser = require('./scraper/browser'); +const headless = require('./scraper/headless'); app.use(express.urlencoded({ extended: true })); app.get('/', (req, res) => { - const routes = ['/', '/details', '/browser', '/browser/details', '/browser/comments']; + const routes = [ + '/simple', + '/simple/details', + '/headless', + '/headless/details', + '/headless/comments' + ]; const html = routes.map(route => `
`).join('