Files
YYeTsBot/yyetsbot/fansub.py

455 lines
16 KiB
Python
Raw Normal View History

2021-01-18 22:26:02 +08:00
# coding: utf-8
# YYeTsBot - fansub.py
# 2019/8/15 18:30
2023-05-17 20:05:48 +02:00
__author__ = "Benny <benny.think@gmail.com>"
2021-01-18 22:26:02 +08:00
2021-07-11 15:53:39 +08:00
import contextlib
import hashlib
import json
2021-01-18 22:26:02 +08:00
import logging
2021-07-11 15:53:39 +08:00
import os
2021-01-18 22:26:02 +08:00
import pickle
import re
2021-07-11 15:53:39 +08:00
import sys
2021-01-22 20:44:59 +08:00
2021-07-11 15:53:39 +08:00
import fakeredis
2021-01-22 20:44:59 +08:00
import pymongo
import redis
2021-07-11 15:53:39 +08:00
import requests
2021-01-18 22:26:02 +08:00
from bs4 import BeautifulSoup
2021-12-16 20:04:37 +08:00
from bson.objectid import ObjectId
2021-01-18 22:26:02 +08:00
2023-05-17 20:05:48 +02:00
from config import (
BD2020_SEARCH,
FANSUB_ORDER,
FIX_SEARCH,
MONGO,
NEWZMZ_RESOURCE,
NEWZMZ_SEARCH,
REDIS,
WORKERS,
DISCUSS,
XL720_SEARCH,
ZHUIXINFAN_RESOURCE,
ZHUIXINFAN_SEARCH,
)
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(filename)s [%(levelname)s]: %(message)s")
2021-01-18 22:26:02 +08:00
session = requests.Session()
2021-07-31 17:49:17 +08:00
ua = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36"
2021-01-18 22:26:02 +08:00
session.headers.update({"User-Agent": ua})
2021-01-24 21:39:25 +08:00
this_module = sys.modules[__name__]
2021-01-18 22:26:02 +08:00
class Redis:
def __init__(self):
if os.getenv("DISABLE_REDIS"):
self.r = fakeredis.FakeStrictRedis()
else:
self.r = redis.StrictRedis(host=REDIS, decode_responses=True)
def __del__(self):
self.r.close()
@classmethod
2021-07-31 17:49:17 +08:00
def preview_cache(cls, timeout=3600 * 24):
def func(fun):
def inner(*args, **kwargs):
search_text = args[1]
cache_value = cls().r.get(search_text)
if cache_value:
2023-05-17 20:05:48 +02:00
logging.info("🎉 Preview cache hit for %s %s", fun, search_text)
return json.loads(cache_value)
else:
2023-05-17 20:05:48 +02:00
logging.info("😱 Preview cache expired. Running %s %s", fun, search_text)
res = fun(*args, **kwargs)
# if res len is 1, then it means we have no search result at all.
if len(res) != 1:
json_str = json.dumps(res, ensure_ascii=False)
cls().r.set(search_text, json_str, ex=timeout)
# save hash->url mapping
res_copy = res.copy()
res_copy.pop("class")
for url_hash, value in res_copy.items():
cls().r.hset(url_hash, mapping=value)
return res
return inner
return func
@classmethod
2021-07-31 17:49:17 +08:00
def result_cache(cls, timeout=3600 * 24):
def func(fun):
def inner(*args, **kwargs):
# this method will convert hash to url
url_or_hash = args[1]
if re.findall(r"http[s]?://", url_or_hash):
# means this is a url
2023-05-17 20:05:48 +02:00
url_hash = hashlib.sha1(url_or_hash.encode("u8")).hexdigest()
cls().r.hset(url_hash, mapping={"url": url_or_hash})
else:
# this is cache, retrieve real url from redis
url_or_hash = cls().r.hget(url_or_hash, "url")
if not url_or_hash:
url_or_hash = ""
url = url_or_hash
del url_or_hash
cache_value = cls().r.hgetall(url)
if cache_value:
2023-05-17 20:05:48 +02:00
logging.info("🎉 Result cache hit for %s %s", fun, url)
return cache_value
else:
2023-05-17 20:05:48 +02:00
logging.info("😱 Result cache expired. Running %s %s", fun, url)
new_args = (args[0], url)
res = fun(*new_args, **kwargs)
# we must have an result for it,
cls().r.hset(url, mapping=res)
cls().r.expire(url, timeout)
return res
return inner
return func
2021-01-18 22:26:02 +08:00
class BaseFansub:
"""
all the subclass should implement three kinds of methods:
1. online search, contains preview for bot and complete result
2021-01-21 19:32:55 +08:00
2. login and check (set pass if not applicable)
3. search_result as this is critical for bot to draw markup
2021-01-18 22:26:02 +08:00
"""
2023-05-17 20:05:48 +02:00
2021-01-18 22:26:02 +08:00
cookie_file = None
def __init__(self):
self.redis = Redis().r
2021-01-18 22:26:02 +08:00
@property
def id(self):
# implement how to get the unique id for this resource
return None
def get_html(self, link: str, encoding=None) -> str:
2021-01-18 22:26:02 +08:00
# return html text of search page
with session.get(link) as r:
if encoding is not None:
r.encoding = encoding
html = r.text
2021-07-31 17:49:17 +08:00
logging.info("[%s] [%s] Searching for %s", self.__class__.__name__, r.status_code, link)
return html
2021-01-18 22:26:02 +08:00
def search_preview(self, search_text: str) -> dict:
2021-01-18 22:26:02 +08:00
# try to retrieve critical information from html
# this result must return to bot for manual selection
2021-01-21 19:32:55 +08:00
# {"url1": "name1", "url2": "name2", "source":"yyets"}
2021-01-18 22:26:02 +08:00
pass
def search_result(self, url_or_hash: str) -> dict:
2021-01-18 22:26:02 +08:00
"""
This will happen when user click one of the button, only by then we can know the resource link
From the information above, try to get a detail dict structure.
This method should check cache first if applicable
This method should set self.link and self.data
:param url_or_hash: url or hash.
2021-01-21 19:32:55 +08:00
:return: {"all": dict_result, "share": share_link, "cnname": cnname}
2021-01-18 22:26:02 +08:00
"""
pass
def __login_check(self):
pass
def __manual_login(self):
pass
def __save_cookies__(self, requests_cookiejar):
2023-05-17 20:05:48 +02:00
with open(self.cookie_file, "wb") as f:
2021-01-18 22:26:02 +08:00
pickle.dump(requests_cookiejar, f)
def __load_cookies__(self):
2023-05-17 20:05:48 +02:00
with open(self.cookie_file, "rb") as f:
2021-01-18 22:26:02 +08:00
return pickle.load(f)
class YYeTsOffline(BaseFansub):
def __init__(self, db="zimuzu", col="yyets"):
2021-01-22 20:44:59 +08:00
super().__init__()
2021-01-22 22:21:39 +08:00
self.mongo = pymongo.MongoClient(host=MONGO)
2021-12-11 14:18:23 +08:00
self.db = self.mongo[db]
self.collection = self.db[col]
2021-01-22 20:44:59 +08:00
2021-08-10 00:07:03 +08:00
@Redis.preview_cache(60)
def search_preview(self, search_text: str) -> dict:
logging.info("[%s] Loading offline data from MongoDB...", self.__class__.__name__)
2023-05-17 20:05:48 +02:00
projection = {"_id": False, "data.info": True}
data = self.collection.find(
{
"$or": [
{"data.info.cnname": {"$regex": f".*{search_text}.*", "$options": "i"}},
{"data.info.enname": {"$regex": f".*{search_text}.*", "$options": "i"}},
{"data.info.aliasname": {"$regex": f".*{search_text}.*", "$options": "i"}},
]
},
projection,
)
results = {}
2021-01-22 20:44:59 +08:00
for item in data:
info = item["data"]["info"]
2023-02-05 20:51:08 +01:00
url = WORKERS.format(info["id"])
2023-05-17 20:05:48 +02:00
url_hash = hashlib.sha1(url.encode("u8")).hexdigest()
all_name = info["cnname"] + info["enname"] + info["aliasname"]
2023-05-17 20:05:48 +02:00
results[url_hash] = {"name": all_name, "url": url, "class": self.__class__.__name__}
2021-12-11 14:18:23 +08:00
logging.info("[%s] Offline resource search complete", self.__class__.__name__)
2022-08-27 21:04:03 +08:00
comments = self.db["comment"].find({"content": {"$regex": f".*{search_text}.*", "$options": "i"}})
2021-12-11 14:18:23 +08:00
for c in comments:
2023-05-17 20:05:48 +02:00
if c["resource_id"] == 233:
url = DISCUSS.format(str(c["_id"]))
else:
url = WORKERS + "#{}".format(c["resource_id"], str(c["_id"]))
url_hash = hashlib.sha1(url.encode("u8")).hexdigest()
2021-12-11 14:18:23 +08:00
all_name = c["content"]
2023-05-17 20:05:48 +02:00
results[url_hash] = {"name": all_name, "url": url, "class": self.__class__.__name__}
2021-12-11 14:18:23 +08:00
logging.info("[%s] Offline comment search complete", self.__class__.__name__)
results["class"] = self.__class__.__name__
return results
2021-08-10 00:07:03 +08:00
@Redis.result_cache(10 * 60)
def search_result(self, resource_url) -> dict:
2021-01-21 19:32:55 +08:00
# yyets offline
2021-12-11 14:44:30 +08:00
2023-07-28 19:21:28 +02:00
# resource: https://yyets.click/resource.html?id=37089
# comment: 'https://yyets.click/resource.html?id=233#61893ae51e9152e43fa24124'
2021-12-11 14:44:30 +08:00
if "#" in resource_url:
cid = resource_url.split("#")[1]
data: dict = self.db["comment"].find_one(
2023-05-17 20:05:48 +02:00
{"_id": ObjectId(cid)}, {"_id": False, "ip": False, "type": False, "children": False, "browser": False}
2021-12-11 14:44:30 +08:00
)
share = resource_url
name = f"{data['username']} 的分享"
t = "comment"
else:
rid = resource_url.split("id=")[1]
2023-05-17 20:05:48 +02:00
data: dict = self.collection.find_one({"data.info.id": int(rid)}, {"_id": False})
2021-12-11 14:44:30 +08:00
name = data["data"]["info"]["cnname"]
2023-02-05 20:51:08 +01:00
share = WORKERS.format(rid)
2021-12-11 14:44:30 +08:00
t = "resource"
return {"all": json.dumps(data, ensure_ascii=False, indent=4), "share": share, "cnname": name, "type": t}
2021-01-22 20:44:59 +08:00
def __del__(self):
self.mongo.close()
class ZimuxiaOnline(BaseFansub):
2021-07-31 17:49:17 +08:00
@Redis.preview_cache()
def search_preview(self, search_text: str) -> dict:
2021-01-21 19:32:55 +08:00
# zimuxia online
search_url = FIX_SEARCH.format(kw=search_text)
html_text = self.get_html(search_url)
2023-05-17 20:05:48 +02:00
logging.info("[%s] Parsing html...", self.__class__.__name__)
soup = BeautifulSoup(html_text, "html.parser")
2021-01-20 21:51:03 +08:00
link_list = soup.find_all("h2", class_="post-title")
dict_result = {}
for link in link_list:
2021-01-21 19:32:55 +08:00
# TODO wordpress search content and title, some cases it would be troublesome
2023-05-17 20:05:48 +02:00
url = link.a["href"]
url_hash = hashlib.sha1(url.encode("u8")).hexdigest()
2021-01-20 21:51:03 +08:00
name = link.a.text
2023-05-17 20:05:48 +02:00
dict_result[url_hash] = {"url": url, "name": name, "class": self.__class__.__name__}
dict_result["class"] = self.__class__.__name__
2021-01-20 21:51:03 +08:00
return dict_result
2021-07-31 17:49:17 +08:00
@Redis.result_cache()
2021-01-21 19:32:55 +08:00
def search_result(self, resource_url: str) -> dict:
# zimuxia online
logging.info("[%s] Loading detail page %s", self.__class__.__name__, resource_url)
html = self.get_html(resource_url)
2023-05-17 20:05:48 +02:00
soup = BeautifulSoup(html, "html.parser")
2021-01-20 21:51:03 +08:00
cnname = soup.title.text.split("|")[0]
return {"all": html, "share": resource_url, "cnname": cnname, "type": "resource"}
2021-01-20 22:43:02 +08:00
class ZhuixinfanOnline(BaseFansub):
2021-07-31 17:49:17 +08:00
@Redis.preview_cache()
def search_preview(self, search_text: str) -> dict:
# zhuixinfan online
search_link = ZHUIXINFAN_SEARCH.format(search_text)
html_text = self.get_html(search_link)
2023-05-17 20:05:48 +02:00
logging.info("[%s] Parsing html...", self.__class__.__name__)
soup = BeautifulSoup(html_text, "html.parser")
link_list = soup.find_all("ul", class_="resource_list")
dict_result = {}
for li in link_list:
for link in li:
with contextlib.suppress(AttributeError):
name = link.dd.text
url = ZHUIXINFAN_RESOURCE.format(link.dd.a["href"])
2023-05-17 20:05:48 +02:00
url_hash = hashlib.sha1(url.encode("u8")).hexdigest()
dict_result[url_hash] = {"url": url, "name": name, "class": self.__class__.__name__}
dict_result["class"] = self.__class__.__name__
return dict_result
2021-07-31 17:49:17 +08:00
@Redis.result_cache()
def search_result(self, url: str) -> dict:
# zhuixinfan online
# don't worry, url_hash will become real url
logging.info("[%s] Loading detail page %s", self.__class__.__name__, url)
html = self.get_html(url, "utf-8")
# 解析获得cnname等信息
2023-05-17 20:05:48 +02:00
soup = BeautifulSoup(html, "html.parser")
cnname = soup.title.text.split("_")[0]
return {"all": html, "share": url, "cnname": cnname, "type": "resource"}
class NewzmzOnline(BaseFansub):
2021-07-31 17:49:17 +08:00
@Redis.preview_cache()
def search_preview(self, search_text: str) -> dict:
# zhuixinfan online
search_link = NEWZMZ_SEARCH.format(search_text)
html_text = self.get_html(search_link)
search_response = json.loads(html_text)
dict_result = {}
for item in search_response["data"]:
url = NEWZMZ_RESOURCE.format(item["link_url"].split("-")[1])
2023-05-17 20:05:48 +02:00
url_hash = hashlib.sha1(url.encode("u8")).hexdigest()
dict_result[url_hash] = {
"url": url,
"name": item["name"] + item["name_eng"],
2023-05-17 20:05:48 +02:00
"class": self.__class__.__name__,
}
dict_result["class"] = self.__class__.__name__
return dict_result
2021-07-31 17:49:17 +08:00
@Redis.result_cache()
def search_result(self, url: str) -> dict:
logging.info("[%s] Loading detail page %s", self.__class__.__name__, url)
html = self.get_html(url)
# 解析获得cnname等信息
2023-05-17 20:05:48 +02:00
soup = BeautifulSoup(html, "html.parser")
cnname = soup.title.text.split("-")[0]
return {"all": html, "share": url, "cnname": cnname, "type": "resource"}
2021-07-31 17:49:17 +08:00
class BD2020(NewzmzOnline):
@Redis.preview_cache()
def search_preview(self, search_text: str) -> dict:
2021-07-31 17:49:17 +08:00
search_link = BD2020_SEARCH.format(search_text)
html_text = self.get_html(search_link)
2023-05-17 20:05:48 +02:00
logging.info("[%s] Parsing html...", self.__class__.__name__)
soup = BeautifulSoup(html_text, "html.parser")
2021-07-31 17:49:17 +08:00
link_list = soup.find_all("li", class_="list-item")
dict_result = {}
2021-07-31 17:49:17 +08:00
for item in link_list:
name = item.div.a.text.strip()
url = item.div.a["href"]
2023-05-17 20:05:48 +02:00
url_hash = hashlib.sha1(url.encode("u8")).hexdigest()
dict_result[url_hash] = {"url": url, "name": name, "class": self.__class__.__name__}
dict_result["class"] = self.__class__.__name__
return dict_result
2021-07-31 17:49:17 +08:00
class XL720(BD2020):
@Redis.preview_cache()
def search_preview(self, search_text: str) -> dict:
search_link = XL720_SEARCH.format(search_text)
html_text = self.get_html(search_link)
2023-05-17 20:05:48 +02:00
logging.info("[%s] Parsing html...", self.__class__.__name__)
soup = BeautifulSoup(html_text, "html.parser")
2021-07-31 17:49:17 +08:00
dict_result = {}
link_list = soup.find_all("div", class_="post clearfix")
for item in link_list:
name = re.sub(r"\s", "", item.h3.a.text.strip())
url = item.h3.a["href"]
2023-05-17 20:05:48 +02:00
url_hash = hashlib.sha1(url.encode("u8")).hexdigest()
dict_result[url_hash] = {"url": url, "name": name, "class": self.__class__.__name__}
2021-07-31 17:49:17 +08:00
dict_result["class"] = self.__class__.__name__
return dict_result
@Redis.result_cache()
def search_result(self, url: str) -> dict:
logging.info("[%s] Loading detail page %s", self.__class__.__name__, url)
html = self.get_html(url)
2023-05-17 20:05:48 +02:00
soup = BeautifulSoup(html, "html.parser")
2021-07-31 17:49:17 +08:00
cnname = soup.title.text.split("迅雷下载")[0]
return {"all": html, "share": url, "cnname": cnname, "type": "resource"}
2021-01-20 22:43:02 +08:00
class FansubEntrance(BaseFansub):
order = FANSUB_ORDER.split(",")
2021-01-20 23:04:23 +08:00
def search_preview(self, search_text: str) -> dict:
class_ = "聪明机智温柔可爱善良的Benny"
2021-01-21 19:32:55 +08:00
for sub_str in self.order:
logging.info("Looping from %s", sub_str)
fc = globals().get(sub_str)
result = fc().search_preview(search_text)
2021-01-20 23:04:23 +08:00
# this result contains source:sub, so we'll pop and add it
class_ = result.pop("class")
2021-01-20 23:04:23 +08:00
if result:
2021-01-21 19:32:55 +08:00
logging.info("Result hit in %s %s", sub_str, fc)
FansubEntrance.fansub_class = fc
result["class"] = class_
2021-01-20 23:04:23 +08:00
return result
return {"class": class_}
2021-01-20 23:04:23 +08:00
2021-01-21 19:32:55 +08:00
def search_result(self, resource_url_hash: str) -> dict:
# entrance
cache_data = self.redis.hgetall(resource_url_hash)
resource_url = cache_data["url"]
class_name = cache_data["class"]
fc = globals().get(class_name)
return fc().search_result(resource_url)
2021-01-20 23:04:23 +08:00
# we'll check if FANSUB_ORDER is correct. Must put it here, not before.
for fs in FANSUB_ORDER.split(","):
if globals().get(fs) is None:
raise NameError(f"FANSUB_ORDER is incorrect! {fs}")
2021-01-24 21:39:25 +08:00
# Commands can use latin letters, numbers and underscores. yyets_offline
def class_to_tg(sub_class: str):
trans = {"Online": "_online", "Offline": "_offline"}
for upper, lower in trans.items():
sub_class = sub_class.replace(upper, lower)
return sub_class.lower()
for sub_name in globals().copy():
if sub_name.endswith("Offline") or sub_name.endswith("Online"):
cmd_name = class_to_tg(sub_name)
m = getattr(this_module, sub_name)
logging.info("Mapping %s to %s", cmd_name, m)
vars()[cmd_name] = m
2023-05-17 20:05:48 +02:00
if __name__ == "__main__":
2022-03-11 21:22:37 +08:00
sub = ZimuxiaOnline()
search = sub.search_preview("最爱")
2021-07-31 17:49:17 +08:00
print(search)
2022-03-11 21:22:37 +08:00
uh = "4bcd33af9be2fba0060c388e984c7a2509e7e654"
result = sub.search_result(uh)
print(json.dumps(result, ensure_ascii=False))