Refactor again

This commit is contained in:
Michel Roux 2019-11-30 18:09:30 +01:00
parent 79f847316c
commit 682aa72031
4 changed files with 165 additions and 161 deletions

5
app.py
View File

@ -52,5 +52,6 @@ def admin():
if __name__ == '__main__':
app_debug = environ.get('FLASK_ENV', 'production') is 'development'
app.run('0.0.0.0', debug=app_debug)
app_debug = environ.get('FLASK_ENV', 'production') == 'development'
app_port = environ.get('FLASK_PORT', 5000)
app.run('0.0.0.0', app_port, app_debug)

View File

@ -23,7 +23,7 @@ class Connector(ABC):
self.category = category
self.data = []
self.is_more = False
self.on_error = False
self.on_error = True
self.page = page
self.return_type = return_type
@ -32,19 +32,18 @@ class Connector(ABC):
pass
@abstractmethod
def __search(self):
def search(self):
pass
@abstractmethod
def __get_history(self):
def get_history(self):
pass
def run(self):
if not len(self.data):
if self.on_error:
if self.return_type is ConnectorReturn.SEARCH:
self.__search()
self.search()
elif self.return_type is ConnectorReturn.HISTORY:
self.__get_history()
self.get_history()
return self
def curl_content(self, url, params=None, ajax=False):
@ -116,58 +115,59 @@ class Nyaa(Connector):
to_query = '(%s vf)|(%s vostfr)|(%s multi)|(%s french)' % (self.query, self.query, self.query, self.query)
return '%s/?f=0&c=1_3&s=%s&o=desc&q=%s&p=%s' % (self.base_url, sort_type, to_query, self.page)
def __get_history(self):
self.__search()
def get_history(self):
self.search()
def __search(self):
response = self.curl_content(self.get_full_search_url())
def search(self):
if self.on_error:
response = self.curl_content(self.get_full_search_url())
if response['http_code'] is 200:
html = BeautifulSoup(response['output'], 'html.parser')
trs = html.select('table.torrent-list tr')
valid_trs = 0
if response['http_code'] is 200:
html = BeautifulSoup(response['output'], 'html.parser')
trs = html.select('table.torrent-list tr')
valid_trs = 0
for i, tr in enumerate(trs):
if not i:
continue
tds = tr.findAll('td')
check_downloads = int(tds[7].string)
check_seeds = int(tds[5].string)
if check_downloads or check_seeds:
urls = tds[1].findAll('a')
if len(urls) > 1:
url = urls[1]
has_comment = True
else:
url = urls[0]
has_comment = False
if any(url.string in word for word in self.blacklist_words):
for i, tr in enumerate(trs):
if not i:
continue
valid_trs = valid_trs + 1
tds = tr.findAll('td')
check_downloads = int(tds[7].string)
check_seeds = int(tds[5].string)
self.data.append({
'lang': self.get_lang(url.string),
'href': '%s%s' % (self.base_url, url['href']),
'name': self.boldify(url.string),
'comment': str(urls[0]).replace('/view/',
'%s%s' % (self.base_url, '/view/')) if has_comment else '',
'link': tds[2].decode_contents().replace('/download/', '%s%s' % (self.base_url, '/download/')),
'size': tds[3].string,
'date': '%s:00' % tds[4].string,
'seeds': check_seeds,
'leechs': tds[6].string,
'downloads': check_downloads,
'class': 'is-%s' % tr['class'][0]
})
if check_downloads or check_seeds:
urls = tds[1].findAll('a')
self.is_more = valid_trs is not len(trs)
else:
self.on_error = True
if len(urls) > 1:
url = urls[1]
has_comment = True
else:
url = urls[0]
has_comment = False
if any(url.string in word for word in self.blacklist_words):
continue
valid_trs = valid_trs + 1
self.data.append({
'lang': self.get_lang(url.string),
'href': '%s%s' % (self.base_url, url['href']),
'name': self.boldify(url.string),
'comment': str(urls[0]).replace('/view/',
'%s%s' % (self.base_url, '/view/')) if has_comment else '',
'link': tds[2].decode_contents().replace('/download/',
'%s%s' % (self.base_url, '/download/')),
'size': tds[3].string,
'date': '%s:00' % tds[4].string,
'seeds': check_seeds,
'leechs': tds[6].string,
'downloads': check_downloads,
'class': 'is-%s' % tr['class'][0]
})
self.on_error = False
self.is_more = valid_trs is not len(trs)
class Pantsu(Connector):
@ -184,54 +184,54 @@ class Pantsu(Connector):
to_query = '(%s vf)|(%s vostfr)|(%s multi)|(%s french)' % (self.query, self.query, self.query, self.query)
return '%s/search/%s?c=3_13&order=false&q=%s&sort=%s' % (self.base_url, self.page, to_query, sort_type)
def __get_history(self):
self.__search()
def get_history(self):
self.search()
def __search(self):
response = self.curl_content(self.get_full_search_url())
def search(self):
if self.on_error:
response = self.curl_content(self.get_full_search_url())
if response['http_code'] is 200:
html = BeautifulSoup(response['output'], 'html.parser')
trs = html.select('div.results tr')
valid_trs = 0
if response['http_code'] is 200:
html = BeautifulSoup(response['output'], 'html.parser')
trs = html.select('div.results tr')
valid_trs = 0
for i, tr in enumerate(trs):
if not i:
continue
tds = tr.findAll('td')
check_downloads = int(tds[6].string.replace('-', '0'))
check_seeds = int(tds[4].string.replace('-', '0'))
if check_downloads or check_seeds:
url = tds[1].a
if any(url.string in word for word in self.blacklist_words):
for i, tr in enumerate(trs):
if not i:
continue
valid_trs = valid_trs + 1
tds = tr.findAll('td')
check_downloads = int(tds[6].string.replace('-', '0'))
check_seeds = int(tds[4].string.replace('-', '0'))
self.data.append({
'lang': self.get_lang(url.string),
'href': '%s%s' % (self.base_url, url['href']),
'name': self.boldify(url.string),
'comment': '',
'link': tds[2].decode_contents()
.replace('icon-magnet', 'fa fa-fw fa-magnet')
.replace('icon-floppy', 'fa fa-fw fa-download'),
'size': tds[3].string,
'date': datetime
.strptime(tds[7]['title'], '%m/%d/%Y, %I:%M:%S %p %Z+0')
.strftime('%Y-%m-%d %H:%M:%S'),
'seeds': check_seeds,
'leechs': tds[5].string,
'downloads': check_downloads,
'class': 'is-%s' % tr['class'][0]
})
if check_downloads or check_seeds:
url = tds[1].a
self.is_more = valid_trs is not len(trs)
else:
self.on_error = True
if any(url.string in word for word in self.blacklist_words):
continue
valid_trs = valid_trs + 1
self.data.append({
'lang': self.get_lang(url.string),
'href': '%s%s' % (self.base_url, url['href']),
'name': self.boldify(url.string),
'comment': '',
'link': tds[2].decode_contents()
.replace('icon-magnet', 'fa fa-fw fa-magnet')
.replace('icon-floppy', 'fa fa-fw fa-download'),
'size': tds[3].string,
'date': datetime
.strptime(tds[7]['title'], '%m/%d/%Y, %I:%M:%S %p %Z+0')
.strftime('%Y-%m-%d %H:%M:%S'),
'seeds': check_seeds,
'leechs': tds[5].string,
'downloads': check_downloads,
'class': 'is-%s' % tr['class'][0]
})
self.on_error = False
self.is_more = valid_trs is not len(trs)
class YggTorrent(Connector):
@ -249,13 +249,11 @@ class YggTorrent(Connector):
self.base_url, sort_type, self.category, self.query, self.page
)
def __get_history(self):
self.__search()
def get_history(self):
self.search()
def __search(self):
if self.category is None:
self.on_error = True
else:
def search(self):
if self.category and self.on_error:
response = self.curl_content(self.get_full_search_url())
if response['http_code'] is 200:
@ -285,8 +283,9 @@ class YggTorrent(Connector):
'name': self.boldify(url.string),
'comment': '<a href="%s#comm" target="_blank"><i class="fa fa-comments-o"></i>%s</a>' %
(url['href'], tds[3].string),
'link': '<a href="%s/engine/download_torrent?id=%s"><i class="fa fa-fw fa-download"></i></a>' %
(self.base_url, re.search(r'/(\d+)', url['href']).group(1)),
'link': '<a href="%s/engine/download_torrent?id=%s">'
'<i class="fa fa-fw fa-download"></i>'
'</a>' % (self.base_url, re.search(r'/(\d+)', url['href']).group(1)),
'size': tds[5].string,
'date': datetime.fromtimestamp(int(tds[4].div.string)).strftime('%Y-%m-%d %H:%M:%S'),
'seeds': check_seeds,
@ -295,9 +294,8 @@ class YggTorrent(Connector):
'class': ''
})
self.on_error = False
self.is_more = valid_trs is not len(trs)
else:
self.on_error = True
class AnimeUltime(Connector):
@ -317,75 +315,79 @@ class AnimeUltime(Connector):
return '%s/%s-0-1/%s' % (self.base_url, sort_type, from_date)
def __search(self):
response = self.curl_content(self.get_full_search_url(), {'search': self.query})
def search(self):
if self.on_error:
response = self.curl_content(self.get_full_search_url(), {'search': self.query})
if response['http_code'] is 200:
html = BeautifulSoup(response['output'], 'html.parser')
title = html.select('div.title')
if response['http_code'] is 200:
html = BeautifulSoup(response['output'], 'html.parser')
title = html.select('div.title')
if 'Recherche' in title[0].string:
trs = html.select('table.jtable tr')
if 'Recherche' in title[0].string:
trs = html.select('table.jtable tr')
for i, tr in enumerate(trs):
if not i:
continue
for i, tr in enumerate(trs):
if not i:
continue
tds = tr.findAll('td')
tds = tr.findAll('td')
if len(tds) < 2:
continue
if len(tds) < 2:
continue
url = tds[0].a
url = tds[0].a
self.data.append({
'lang': 'jp',
'href': '%s/%s' % (self.base_url, url['href']),
'name': url.decode_contents(),
'type': tds[1].string
})
else:
player = html.select('div.AUVideoPlayer')
name = html.select('h1')
ani_type = html.select('div.titre')
self.data.append({
'lang': 'jp',
'href': '%s/%s' % (self.base_url, url['href']),
'name': url.decode_contents(),
'type': tds[1].string
'href': '%s/file-0-1/%s' % (self.base_url, player[0]['data-serie']),
'name': self.boldify(name[0].string),
'type': ani_type[0].string.replace(':', '')
})
else:
player = html.select('div.AUVideoPlayer')
name = html.select('h1')
ani_type = html.select('div.titre')
self.data.append({
'lang': 'jp',
'href': '%s%s' % (self.get_file_url(), player[0]['data-serie']),
'name': self.boldify(name[0].string),
'type': ani_type[0].string.replace(':', '')
})
else:
self.on_error = True
self.on_error = False
def __get_history(self):
response = self.curl_content(self.get_full_search_url())
def get_history(self):
if self.on_error:
response = self.curl_content(self.get_full_search_url())
if response['http_code'] is 200:
html = BeautifulSoup(response['output'], 'html.parser')
tables = html.select('table.jtable')
h3s = html.findAll('h3')
if response['http_code'] is 200:
html = BeautifulSoup(response['output'], 'html.parser')
tables = html.select('table.jtable')
h3s = html.findAll('h3')
for i, table in enumerate(tables):
for j, tr in enumerate(table.findAll('tr')):
if not j:
continue
for i, table in enumerate(tables):
for j, tr in enumerate(table.findAll('tr')):
if not j:
continue
tds = tr.findAll('td')
link = tds[0].a
tds = tr.findAll('td')
link = tds[0].a
current_locale = locale.getlocale()
locale.setlocale(locale.LC_ALL, ('fr_FR', 'UTF-8'))
release_date = datetime.strptime(h3s[i].string, '%A %d %B %Y : ').strftime('%Y-%m-%d %H:%M:%S')
locale.setlocale(locale.LC_ALL, current_locale)
current_locale = locale.getlocale()
locale.setlocale(locale.LC_ALL, ('fr_FR', 'UTF-8'))
release_date = datetime.strptime(h3s[i].string, '%A %d %B %Y : ').strftime('%Y-%m-%d %H:%M:%S')
locale.setlocale(locale.LC_ALL, current_locale)
self.data.append({
'lang': 'jp',
'href': '%s%s' % (self.get_full_search_url('file'), link['href']),
'name': link.string,
'type': tds[4].string,
'date': release_date
})
self.data.append({
'lang': 'jp',
'href': '%s/%s' % (self.base_url, link['href']),
'name': link.string,
'type': tds[4].string,
'date': release_date
})
self.on_error = False
class Other(Connector):
@ -396,8 +398,8 @@ class Other(Connector):
def get_full_search_url(self):
pass
def __search(self):
def search(self):
pass
def __get_history(self):
def get_history(self):
pass

View File

@ -5,11 +5,12 @@ services:
build: .
ports:
- "5000:5000"
entrypoint: flask run -h 0.0.0.0
entrypoint: python3 app.py
working_dir: /app
environment:
FLASK_APP: app.py
FLASK_ENV: development
FLASK_PORT: 5000
MYSQL_USER: root
MYSQL_PASSWORD: root
MYSQL_DATABASE: www

View File

@ -27,7 +27,7 @@
{% for connector in connectors %}
{% for torrent in connector.data %}
{% enfor %}
{% endfor %}
{% endfor %}
</tbody>