From a7f6854632df6cab12054af8497c812e81656bd7 Mon Sep 17 00:00:00 2001 From: Michel Roux Date: Sat, 23 Oct 2021 19:15:42 +0000 Subject: [PATCH] Remove Pantasu (dead) --- pynyaata/connectors/__init__.py | 4 -- pynyaata/connectors/pantsu.py | 85 --------------------------------- 2 files changed, 89 deletions(-) delete mode 100644 pynyaata/connectors/pantsu.py diff --git a/pynyaata/connectors/__init__.py b/pynyaata/connectors/__init__.py index b76a4d3..71caefe 100644 --- a/pynyaata/connectors/__init__.py +++ b/pynyaata/connectors/__init__.py @@ -3,14 +3,12 @@ from asyncio import gather from .animeultime import AnimeUltime from .core import Other from .nyaa import Nyaa -from .pantsu import Pantsu from .yggtorrent import YggTorrent, YggAnimation from ..config import CLOUDPROXY_ENDPOINT async def run_all(*args, **kwargs): coroutines = [Nyaa(*args, **kwargs).run(), - Pantsu(*args, **kwargs).run(), AnimeUltime(*args, **kwargs).run()] if CLOUDPROXY_ENDPOINT: @@ -23,8 +21,6 @@ async def run_all(*args, **kwargs): def get_instance(url, query=''): if 'nyaa.si' in url: return Nyaa(query) - elif 'nyaa.net' in url: - return Pantsu(query) elif 'anime-ultime' in url: return AnimeUltime(query) elif 'ygg' in url: diff --git a/pynyaata/connectors/pantsu.py b/pynyaata/connectors/pantsu.py deleted file mode 100644 index a34c3ca..0000000 --- a/pynyaata/connectors/pantsu.py +++ /dev/null @@ -1,85 +0,0 @@ -from bs4 import BeautifulSoup - -from .core import ConnectorCore, ConnectorReturn, ConnectorCache, curl_content -from ..utils import parse_date, link_exist_in_db, check_blacklist_words, check_if_vf - - -class Pantsu(ConnectorCore): - color = 'is-info' - title = 'Pantsu' - favicon = 'pantsu.png' - base_url = 'https://nyaa.net' - is_light = False - is_behind_cloudflare = False - - def get_full_search_url(self): - sort_type = 4 - if self.return_type is ConnectorReturn.HISTORY: - sort_type = 2 - - to_query = '(%s vf)|(%s vostfr)|(%s multi)|(%s french)' % ( - self.query, - self.query, - self.query, - self.query - ) - return '%s/search/%s?c=3_13&order=false&q=%s&sort=%s' % (self.base_url, self.page, to_query, sort_type) - - def get_history(self): - self.search() - - @ConnectorCache.cache_data - def search(self): - response = curl_content(self.get_full_search_url()) - - if response['http_code'] == 200: - html = BeautifulSoup(response['output'], 'html.parser') - trs = html.select('div.results tr') - valid_trs = 0 - - for i, tr in enumerate(trs): - if not i: - continue - - tds = tr.findAll('td') - check_downloads = int(tds[6].get_text().replace('-', '0')) - check_seeds = int(tds[4].get_text().replace('-', '0')) - - if check_downloads or check_seeds: - url = tds[1].a - url_safe = url.get_text() - - if check_blacklist_words(url_safe): - continue - - valid_trs = valid_trs + 1 - href = self.base_url + url['href'] - - self.data.append({ - 'vf': check_if_vf(url_safe), - 'href': href, - 'name': url_safe, - 'comment': '', - 'link': tds[2].decode_contents().replace('icon-magnet', 'fa fa-fw fa-magnet').replace( - 'icon-floppy', 'fa fa-fw fa-download'), - 'size': tds[3].get_text(), - 'date': parse_date(tds[7]['title'][:-6], '%m/%d/%Y, %I:%M:%S %p'), - 'seeds': check_seeds, - 'leechs': tds[5].get_text(), - 'downloads': check_downloads, - 'class': self.color if link_exist_in_db(href) else 'is-%s' % tr['class'][0] - }) - - self.on_error = False - self.is_more = valid_trs and valid_trs != len(trs) - 1 - - @ConnectorCache.cache_data - def is_vf(self, url): - response = curl_content(url) - - if response['http_code'] == 200: - html = BeautifulSoup(response['output'], 'html.parser') - title = html.select('h1.torrent-hr') - return check_if_vf(title[0].get_text()) - - return False