Cleanup and remove nodejs dep

This commit is contained in:
Michel Roux 2020-04-06 15:46:45 +02:00
parent 289f938eb6
commit 1bf903b1f9
3 changed files with 8 additions and 17 deletions

View File

@ -4,9 +4,9 @@ ENV DEBIAN_FRONTEND noninteractive
ENV LANG C.UTF-8 ENV LANG C.UTF-8
RUN apt-get update && apt-get -y upgrade && \ RUN apt-get update && apt-get -y upgrade && \
apt-get -y install python3 python3-pip locales nodejs \ apt-get -y install python3 python3-pip locales \
python3-flask python3-flask-sqlalchemy python3-flask-httpauth python3-flaskext.wtf \ python3-flask python3-flask-sqlalchemy python3-flask-httpauth python3-flaskext.wtf \
python3-pymysql python3-requests python3-bs4 python3-dotenv && \ python3-pymysql python3-requests python3-requests-toolbelt python3-bs4 python3-dotenv && \
pip3 install cfscrape && \ pip3 install cloudscraper && \
printf "en_US.UTF-8 UTF-8\nfr_FR.UTF-8 UTF-8\n" > /etc/locale.gen && \ printf "en_US.UTF-8 UTF-8\nfr_FR.UTF-8 UTF-8\n" > /etc/locale.gen && \
locale-gen && rm -rf /var/lib/apt/lists/* locale-gen && rm -rf /var/lib/apt/lists/*

View File

@ -8,11 +8,13 @@ from logging import getLogger
from urllib.parse import quote from urllib.parse import quote
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from cfscrape import create_scraper from cloudscraper import create_scraper
from config import IS_DEBUG, CACHE_TIMEOUT, BLACKLIST_WORDS from config import IS_DEBUG, CACHE_TIMEOUT, BLACKLIST_WORDS
from models import AnimeLink from models import AnimeLink
scraper = create_scraper()
class ConnectorReturn(Enum): class ConnectorReturn(Enum):
SEARCH = 1 SEARCH = 1
@ -102,11 +104,6 @@ class Connector(ABC):
def is_light(self): def is_light(self):
pass pass
@property
@abstractmethod
def is_behind_cloudflare(self):
pass
def __init__(self, query, page=1, return_type=ConnectorReturn.SEARCH): def __init__(self, query, page=1, return_type=ConnectorReturn.SEARCH):
self.query = query self.query = query
self.data = [] self.data = []
@ -136,8 +133,6 @@ class Connector(ABC):
return self return self
def curl_content(self, url, params=None, ajax=False): def curl_content(self, url, params=None, ajax=False):
scraper = create_scraper()
if ajax: if ajax:
headers = {'X-Requested-With': 'XMLHttpRequest'} headers = {'X-Requested-With': 'XMLHttpRequest'}
else: else:
@ -193,7 +188,6 @@ class Nyaa(Connector):
favicon = 'nyaa.png' favicon = 'nyaa.png'
base_url = 'https://nyaa.si' base_url = 'https://nyaa.si'
is_light = False is_light = False
is_behind_cloudflare = False
def get_full_search_url(self): def get_full_search_url(self):
sort_type = 'size' sort_type = 'size'
@ -266,7 +260,6 @@ class Pantsu(Connector):
favicon = 'pantsu.png' favicon = 'pantsu.png'
base_url = 'https://nyaa.net' base_url = 'https://nyaa.net'
is_light = False is_light = False
is_behind_cloudflare = False
def get_full_search_url(self): def get_full_search_url(self):
sort_type = 4 sort_type = 4
@ -349,7 +342,6 @@ class YggTorrent(Connector):
favicon = 'yggtorrent.png' favicon = 'yggtorrent.png'
base_url = 'https://www2.yggtorrent.se' base_url = 'https://www2.yggtorrent.se'
is_light = False is_light = False
is_behind_cloudflare = True
category = 2179 category = 2179
def get_full_search_url(self): def get_full_search_url(self):
@ -425,7 +417,6 @@ class AnimeUltime(Connector):
favicon = 'animeultime.png' favicon = 'animeultime.png'
base_url = 'http://www.anime-ultime.net' base_url = 'http://www.anime-ultime.net'
is_light = True is_light = True
is_behind_cloudflare = False
def get_full_search_url(self): def get_full_search_url(self):
from_date = '' from_date = ''
@ -530,7 +521,6 @@ class Other(Connector):
favicon = 'blank.png' favicon = 'blank.png'
base_url = '' base_url = ''
is_light = True is_light = True
is_behind_cloudflare = False
def get_full_search_url(self): def get_full_search_url(self):
pass pass

View File

@ -8,4 +8,5 @@ requests==2.21.0
beautifulsoup4==4.7.1 beautifulsoup4==4.7.1
python-dotenv==0.9.1 python-dotenv==0.9.1
Werkzeug==0.14.1 Werkzeug==0.14.1
cfscrape requests-toolbelt
cloudscraper