2020-12-26 11:27:05 +00:00
|
|
|
__license__ = 'GPL 3'
|
2020-12-31 16:26:36 +00:00
|
|
|
__copyright__ = '2012, Ruben Pollan <meskio@sindominio.net>; 2020, ibu radempa <ibu@radempa.de>'
|
2020-12-26 11:27:05 +00:00
|
|
|
__docformat__ = 'restructuredtext en'
|
|
|
|
|
2020-12-31 16:26:36 +00:00
|
|
|
import json
|
2020-12-26 11:36:52 +00:00
|
|
|
try:
|
2020-12-31 16:26:36 +00:00
|
|
|
from urllib.parse import quote_plus as quote
|
2020-12-26 11:36:52 +00:00
|
|
|
except:
|
|
|
|
from urllib2 import quote
|
|
|
|
try:
|
|
|
|
from PyQt5.Qt import QUrl
|
|
|
|
except:
|
|
|
|
from PyQt4.Qt import QUrl
|
|
|
|
from contextlib import closing
|
2020-12-26 11:27:05 +00:00
|
|
|
from calibre import browser
|
|
|
|
from calibre.gui2 import open_url
|
|
|
|
from calibre.gui2.store import StorePlugin
|
|
|
|
from calibre.gui2.store.basic_config import BasicStoreConfig
|
|
|
|
from calibre.gui2.store.search_result import SearchResult
|
|
|
|
from calibre.gui2.store.web_store_dialog import WebStoreDialog
|
2020-12-31 16:26:36 +00:00
|
|
|
from . import TheAnarchistLibraryStore
|
|
|
|
|
|
|
|
|
|
|
|
url1 = 'https://theanarchistlibrary.org/search?fmt=json&page=%s&query=%s'
|
|
|
|
url2 = 'https://usa.anarchistlibraries.net/search?fmt=json&page=%s&query=%s'
|
|
|
|
"""Search URLs. If the library has no fallback url, set url2 = None."""
|
|
|
|
|
|
|
|
|
|
|
|
max_pages = 10
|
|
|
|
"""Page limit. (amusewiki gives us 10 results per page.)"""
|
|
|
|
|
|
|
|
|
|
|
|
user_agent = 'Calibre plugin calibre-tal v' + '{}.{}.{}'.format(*TheAnarchistLibraryStore.version)
|
|
|
|
|
2020-12-26 11:27:05 +00:00
|
|
|
|
|
|
|
class TheAnarchistLibraryStore(BasicStoreConfig, StorePlugin):
|
|
|
|
|
|
|
|
def open(self, parent=None, detail_item=None, external=False):
|
|
|
|
url = 'http://theanarchistlibrary.org/'
|
|
|
|
|
|
|
|
if external or self.config.get('open_external', False):
|
|
|
|
open_url(QUrl(url_slash_cleaner(detail_item if detail_item else url)))
|
|
|
|
else:
|
|
|
|
d = WebStoreDialog(self.gui, url, parent, detail_item)
|
|
|
|
d.setWindowTitle(self.name)
|
|
|
|
d.set_tags(self.config.get('tags', ''))
|
|
|
|
d.exec_()
|
|
|
|
|
2020-12-31 16:26:36 +00:00
|
|
|
def search(self, query, max_results=10, timeout=10):
|
|
|
|
br = browser(user_agent=user_agent)
|
|
|
|
page = 0
|
|
|
|
while page < max_pages:
|
|
|
|
page += 1
|
|
|
|
try:
|
|
|
|
for result in self._iter_search_results(br, url1, page, query, timeout):
|
|
|
|
if result is False:
|
|
|
|
return
|
|
|
|
yield result
|
|
|
|
except:
|
|
|
|
if url2:
|
|
|
|
for result in self._iter_search_results(br, url2, page, query, timeout):
|
|
|
|
if result is False:
|
|
|
|
return
|
|
|
|
yield result
|
2020-12-26 11:27:05 +00:00
|
|
|
|
2020-12-31 16:26:36 +00:00
|
|
|
def _iter_search_results(self, br, url, page, query, timeout):
|
|
|
|
with closing(br.open(url % (page, quote(query)), timeout=timeout)) as f:
|
2020-12-26 11:27:05 +00:00
|
|
|
doc = json.load(f)
|
2020-12-31 16:26:36 +00:00
|
|
|
if not doc:
|
|
|
|
yield False
|
2020-12-26 11:27:05 +00:00
|
|
|
for data in doc:
|
|
|
|
s = SearchResult()
|
|
|
|
s.title = data['title'].strip()
|
|
|
|
s.author = data['author'].strip()
|
|
|
|
s.price = '$0.00'
|
|
|
|
s.detail_item = data['url'].strip()
|
|
|
|
s.drm = SearchResult.DRM_UNLOCKED
|
|
|
|
s.downloads['EPUB'] = data['url'].strip() + '.epub'
|
|
|
|
s.downloads['PDF'] = data['url'].strip() + '.pdf'
|
|
|
|
s.downloads['A4.PDF'] = data['url'].strip() + '.a4.pdf'
|
|
|
|
s.downloads['LT.PDF'] = data['url'].strip() + '.lt.pdf'
|
2020-12-31 16:26:36 +00:00
|
|
|
s.formats = 'EPUB, PDF, A4.PDF, LT.PDF'
|
2020-12-26 11:27:05 +00:00
|
|
|
yield s
|