Remove ygg

This commit is contained in:
Michel Roux 2023-10-01 10:23:35 +02:00
parent 8dcbb10d80
commit 1ac0a3a9a0
8 changed files with 6 additions and 192 deletions

View File

@ -27,7 +27,7 @@ After a good rewrite in Python, it's time to show it to the public, and here it
## Features
* Search on [Nyaa.si](https://nyaa.si/), [YggTorrent](https://duckduckgo.com/?q=yggtorrent) and [Anime-Ultime](http://www.anime-ultime.net/index-0-1)
* Search on [Nyaa.si](https://nyaa.si/) and [Anime-Ultime](http://www.anime-ultime.net/index-0-1)
* Provide useful links to [TheTVDB](https://www.thetvdb.com/) and [Nautiljon](https://www.nautiljon.com/) during a search
* Color official and bad links
* Add seeded links to a database
@ -40,15 +40,6 @@ All is managed by environment variables.
Please look into the `.env.dist` file to list all possible environment variables.
You have to have a running database server to be able to access the admin panel.
### Bypassing CloudFlare for YggTorrent
YggTorrent use CloudFlare to protect them to DDoS attacks.
This app will make abusive requests to their servers, and CloudFlare will try to detect if PyNyaaTa is a real human or not. *I think you have the answer to the question ...*
Over time, CloudFlare will ask you systematically to prove yourself.
To be able to see YggTorrent results, you have to have a FlareSolverr instance running.
Please refer to their [documentation](https://github.com/FlareSolverr/FlareSolverr#installation).
After that, change the `CLOUDPROXY_ENDPOINT` environment variable to refer to your CloudProxy instance.
## Links
- Project homepage: https://nyaa.crystalyx.net/

View File

@ -4,7 +4,7 @@ from pynyaata.models import AnimeLink
links = AnimeLink.query.all()
for link in links:
html = curl_content(link.link, debug=False, cloudflare=True)
html = curl_content(link.link, debug=False)
if html['http_code'] != 200 and html['http_code'] != 500:
print('(%d) %s %s : %s' % (

View File

@ -109,7 +109,7 @@ def latest(page=1):
@app.route('/list')
@app.route('/list/<url_filters>')
@db_required
def list_animes(url_filters='nyaa,yggtorrent'):
def list_animes(url_filters='nyaa'):
filters = None
for i, to_filter in enumerate(url_filters.split(',')):
if not i:

View File

@ -17,7 +17,6 @@ APP_PORT = int(environ.get('FLASK_PORT', 5000))
CACHE_TIMEOUT = int(environ.get('CACHE_TIMEOUT', 60 * 60))
REQUESTS_TIMEOUT = int(environ.get('REQUESTS_TIMEOUT', 5))
BLACKLIST_WORDS = environ.get('BLACKLIST_WORDS', '').split(',') if environ.get('BLACKLIST_WORDS', '') else []
CLOUDPROXY_ENDPOINT = environ.get('CLOUDPROXY_ENDPOINT')
DB_ENABLED = False
REDIS_ENABLED = False
TRANSMISSION_ENABLED = False

View File

@ -3,14 +3,11 @@ from asyncio import gather
from .animeultime import AnimeUltime
from .core import Other
from .nyaa import Nyaa
from .yggtorrent import YggAnimation, YggTorrent
async def run_all(*args, **kwargs):
coroutines = [Nyaa(*args, **kwargs).run(),
AnimeUltime(*args, **kwargs).run(),
YggTorrent(*args, **kwargs).run(),
YggAnimation(*args, **kwargs).run()]
AnimeUltime(*args, **kwargs).run()]
return list(await gather(*coroutines))
@ -20,7 +17,5 @@ def get_instance(url, query=''):
return Nyaa(query)
elif 'anime-ultime' in url:
return AnimeUltime(query)
elif 'ygg' in url:
return YggTorrent(query)
else:
return Other(query)

View File

@ -7,13 +7,10 @@ from redis.exceptions import RedisError
from requests import RequestException, Session
from ..config import CACHE_TIMEOUT, REDIS_ENABLED, REQUESTS_TIMEOUT, logger
from ..flarerequests import FlareRequests
if REDIS_ENABLED:
from ..config import cache
cloudproxy_session = None
class ConnectorReturn(Enum):
SEARCH = 1
@ -71,11 +68,11 @@ class Cache:
ConnectorCache = Cache()
def curl_content(url, params=None, ajax=False, debug=True, cloudflare=False):
def curl_content(url, params=None, ajax=False, debug=True):
output = ''
http_code = 500
method = 'post' if (params is not None) else 'get'
request = FlareRequests() if cloudflare else Session()
request = Session()
headers = {}
if ajax:

View File

@ -1,98 +0,0 @@
import re
from datetime import datetime
from urllib.parse import quote
from bs4 import BeautifulSoup
from .core import ConnectorCache, ConnectorCore, ConnectorReturn, curl_content
from ..utils import check_blacklist_words, check_if_vf, link_exist_in_db, parse_date
class YggTorrent(ConnectorCore):
color = 'is-success'
title = 'YggTorrent'
favicon = 'yggtorrent.png'
base_url = 'https://www3.yggtorrent.wtf'
is_light = False
category = 2179
def get_full_search_url(self):
sort_type = 'size'
if self.return_type is ConnectorReturn.HISTORY:
sort_type = 'publish_date'
sort_page = '&page=%s' % (
(self.page - 1) * 50
) if self.page > 1 else ''
return '%s/engine/search?name=%s&category=2145&sub_category=%s&do=search&order=desc&sort=%s%s' % (
self.base_url, self.query, self.category, sort_type, sort_page
)
def get_history(self):
self.search()
@ConnectorCache.cache_data
def search(self):
if self.category:
response = curl_content(
self.get_full_search_url(), cloudflare=True
)
if response['http_code'] == 200:
html = BeautifulSoup(response['output'], 'html.parser')
trs = html.select('table.table tr')
valid_trs = 0
for i, tr in enumerate(trs):
if not i:
continue
tds = tr.findAll('td')
check_downloads = int(tds[6].get_text())
check_seeds = int(tds[7].get_text())
if check_downloads or check_seeds:
url = tds[1].a
url_safe = url.get_text()
if check_blacklist_words(url_safe):
continue
valid_trs = valid_trs + 1
self.data.append({
'vf': check_if_vf(url_safe),
'href': url['href'],
'name': url_safe,
'comment': '<a href="%s#comm" target="_blank"><i class="fa fa-comments-o"></i>%s</a>' %
(url['href'], tds[3].decode_contents()),
'link': '<a href="%s/engine/download_torrent?id=%s">'
'<i class="fa fa-fw fa-download"></i>'
'</a>' % (self.base_url,
re.search(r'/(\d+)', url['href']).group(1)),
'size': tds[5].get_text(),
'date': parse_date(datetime.fromtimestamp(int(tds[4].div.get_text()))),
'seeds': check_seeds,
'leechs': tds[8].get_text(),
'downloads': check_downloads,
'class': self.color if link_exist_in_db(quote(url['href'], '/+:')) else ''
})
self.on_error = False
self.is_more = valid_trs and valid_trs != len(trs) - 1
@ConnectorCache.cache_data
def is_vf(self, url):
response = curl_content(url)
if response['http_code'] == 200:
html = BeautifulSoup(response['output'], 'html.parser')
title = html.select('#title h1')
return check_if_vf(title[0].get_text())
return False
class YggAnimation(YggTorrent):
title = 'YggAnimation'
category = 2178

View File

@ -1,70 +0,0 @@
from io import BytesIO
from urllib import parse
from charset_normalizer import detect
from requests import RequestException, Response, Session, post
from .config import CLOUDPROXY_ENDPOINT
class FlareRequests(Session):
def request(self, method, url, params=None, data=None, **kwargs):
if not CLOUDPROXY_ENDPOINT:
return super().request(method, url, params, data, **kwargs)
sessions = post(CLOUDPROXY_ENDPOINT, json={"cmd": "sessions.list"}).json()
if "sessions" in sessions and len(sessions["sessions"]) > 0:
FLARE_SESSION = sessions["sessions"][0]
else:
response = post(CLOUDPROXY_ENDPOINT, json={"cmd": "sessions.create"})
session = response.json()
if "session" in session:
FLARE_SESSION = session["session"]
else:
raise RequestException(response)
if params:
url += "&" if len(url.split("?")) > 1 else "?"
url = f"{url}{parse.urlencode(params)}"
post_data = {
"cmd": f"request.{method.lower()}",
"session": FLARE_SESSION,
"url": url,
}
if data:
post_data["postData"] = parse.urlencode(data)
try:
response = post(
CLOUDPROXY_ENDPOINT,
json=post_data,
)
content = response.json()
if "solution" in content:
solution = content["solution"]
raw = solution["response"].encode()
encoding = detect(raw)
resolved = Response()
resolved.status_code = solution["status"]
resolved.headers = solution["headers"]
resolved.raw = BytesIO(raw)
resolved.url = url
resolved.encoding = encoding["encoding"]
resolved.reason = content["status"]
resolved.cookies = solution["cookies"]
return resolved
raise RequestException(response)
except RequestException:
session = post(
CLOUDPROXY_ENDPOINT,
json={"cmd": "sessions.destroy", "session": FLARE_SESSION},
)
raise RequestException(solution)