Files
MediaCrawler/media_platform/bilibili/core.py

718 lines
34 KiB
Python
Raw Normal View History

2025-11-18 12:24:02 +08:00
# -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/bilibili/core.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
2025-07-18 23:26:52 +08:00
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
2025-07-18 23:26:52 +08:00
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
2023-12-03 00:30:10 +08:00
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2023/12/2 18:44
# @Desc : B站爬虫
import asyncio
import os
# import random # Removed as we now use fixed config.CRAWLER_MAX_SLEEP_SEC intervals
2023-12-03 00:30:10 +08:00
from asyncio import Task
2024-07-12 20:09:16 +08:00
from typing import Dict, List, Optional, Tuple, Union
from datetime import datetime, timedelta
import pandas as pd
2023-12-03 00:30:10 +08:00
2025-07-18 23:26:52 +08:00
from playwright.async_api import (
BrowserContext,
BrowserType,
Page,
Playwright,
async_playwright,
)
from playwright._impl._errors import TargetClosedError
2023-12-03 00:30:10 +08:00
2025-07-18 23:26:52 +08:00
import config
2023-12-03 00:30:10 +08:00
from base.base_crawler import AbstractCrawler
2023-12-09 21:10:01 +08:00
from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool
from store import bilibili as bilibili_store
2023-12-03 00:30:10 +08:00
from tools import utils
from tools.cdp_browser import CDPBrowserManager
2024-08-23 08:29:24 +08:00
from var import crawler_type_var, source_keyword_var
2023-12-03 00:30:10 +08:00
from .client import BilibiliClient
2023-12-09 21:10:01 +08:00
from .exception import DataFetchError
from .field import SearchOrderType
2025-10-18 06:30:20 +08:00
from .help import parse_video_info_from_url, parse_creator_info_from_url
2023-12-03 00:30:10 +08:00
from .login import BilibiliLogin
class BilibiliCrawler(AbstractCrawler):
context_page: Page
bili_client: BilibiliClient
browser_context: BrowserContext
cdp_manager: Optional[CDPBrowserManager]
2023-12-03 00:30:10 +08:00
def __init__(self):
self.index_url = "https://www.bilibili.com"
self.user_agent = utils.get_user_agent()
self.cdp_manager = None
2023-12-03 00:30:10 +08:00
async def start(self):
2023-12-08 00:10:04 +08:00
playwright_proxy_format, httpx_proxy_format = None, None
if config.ENABLE_IP_PROXY:
ip_proxy_pool = await create_ip_pool(config.IP_PROXY_POOL_COUNT, enable_validate_ip=True)
2023-12-08 00:10:04 +08:00
ip_proxy_info: IpInfoModel = await ip_proxy_pool.get_proxy()
playwright_proxy_format, httpx_proxy_format = utils.format_proxy_info(ip_proxy_info)
2023-12-08 00:10:04 +08:00
2023-12-03 23:19:02 +08:00
async with async_playwright() as playwright:
# 根据配置选择启动模式
if config.ENABLE_CDP_MODE:
utils.logger.info("[BilibiliCrawler] 使用CDP模式启动浏览器")
self.browser_context = await self.launch_browser_with_cdp(
2025-07-18 23:26:52 +08:00
playwright,
playwright_proxy_format,
self.user_agent,
headless=config.CDP_HEADLESS,
)
else:
utils.logger.info("[BilibiliCrawler] 使用标准模式启动浏览器")
# Launch a browser context.
chromium = playwright.chromium
self.browser_context = await self.launch_browser(chromium, None, self.user_agent, headless=config.HEADLESS)
# stealth.min.js is a js script to prevent the website from detecting the crawler.
await self.browser_context.add_init_script(path="libs/stealth.min.js")
2023-12-03 23:19:02 +08:00
self.context_page = await self.browser_context.new_page()
await self.context_page.goto(self.index_url)
# Create a client to interact with the xiaohongshu website.
2023-12-08 00:10:04 +08:00
self.bili_client = await self.create_bilibili_client(httpx_proxy_format)
2023-12-03 23:19:02 +08:00
if not await self.bili_client.pong():
login_obj = BilibiliLogin(
login_type=config.LOGIN_TYPE,
login_phone="", # your phone number
2023-12-03 23:19:02 +08:00
browser_context=self.browser_context,
context_page=self.context_page,
2025-07-18 23:26:52 +08:00
cookie_str=config.COOKIES,
2023-12-03 23:19:02 +08:00
)
await login_obj.begin()
await self.bili_client.update_cookies(browser_context=self.browser_context)
2023-12-03 23:19:02 +08:00
crawler_type_var.set(config.CRAWLER_TYPE)
if config.CRAWLER_TYPE == "search":
await self.search()
elif config.CRAWLER_TYPE == "detail":
2023-12-03 23:19:02 +08:00
# Get the information and comments of the specified post
2024-06-12 15:18:55 +08:00
await self.get_specified_videos(config.BILI_SPECIFIED_ID_LIST)
elif config.CRAWLER_TYPE == "creator":
2025-05-22 22:26:30 +08:00
if config.CREATOR_MODE:
2025-10-18 06:30:20 +08:00
for creator_url in config.BILI_CREATOR_ID_LIST:
try:
creator_info = parse_creator_info_from_url(creator_url)
utils.logger.info(f"[BilibiliCrawler.start] Parsed creator ID: {creator_info.creator_id} from {creator_url}")
await self.get_creator_videos(int(creator_info.creator_id))
except ValueError as e:
utils.logger.error(f"[BilibiliCrawler.start] Failed to parse creator URL: {e}")
continue
2025-05-22 22:26:30 +08:00
else:
await self.get_all_creator_details(config.BILI_CREATOR_ID_LIST)
2023-12-03 23:19:02 +08:00
else:
pass
2025-07-18 23:26:52 +08:00
utils.logger.info("[BilibiliCrawler.start] Bilibili Crawler finished ...")
2023-12-24 17:57:48 +08:00
async def search(self):
"""
search bilibili video
"""
# Search for video and retrieve their comment information.
if config.BILI_SEARCH_MODE == "normal":
await self.search_by_keywords()
elif config.BILI_SEARCH_MODE == "all_in_time_range":
await self.search_by_keywords_in_time_range(daily_limit=False)
elif config.BILI_SEARCH_MODE == "daily_limit_in_time_range":
await self.search_by_keywords_in_time_range(daily_limit=True)
else:
utils.logger.warning(f"Unknown BILI_SEARCH_MODE: {config.BILI_SEARCH_MODE}")
2025-02-10 17:13:37 +08:00
@staticmethod
2025-07-18 23:26:52 +08:00
async def get_pubtime_datetime(
start: str = config.START_DAY,
end: str = config.END_DAY,
2025-07-18 23:26:52 +08:00
) -> Tuple[str, str]:
"""
获取 bilibili 作品发布日期起始时间戳 pubtime_begin_s 与发布日期结束时间戳 pubtime_end_s
---
:param start: 发布日期起始时间YYYY-MM-DD
:param end: 发布日期结束时间YYYY-MM-DD
2025-07-18 23:26:52 +08:00
Note
---
- 搜索的时间范围为 start end包含 start end
- 若要搜索同一天的内容为了包含 start 当天的搜索内容 pubtime_end_s 的值应该为 pubtime_begin_s 的值加上一天再减去一秒 start 当天的最后一秒
- 如仅搜索 2024-01-05 的内容pubtime_begin_s = 1704384000pubtime_end_s = 1704470399
转换为可读的 datetime 对象pubtime_begin_s = datetime.datetime(2024, 1, 5, 0, 0)pubtime_end_s = datetime.datetime(2024, 1, 5, 23, 59, 59)
- 若要搜索 start end 的内容为了包含 end 当天的搜索内容 pubtime_end_s 的值应该为 pubtime_end_s 的值加上一天再减去一秒 end 当天的最后一秒
- 如搜索 2024-01-05 - 2024-01-06 的内容pubtime_begin_s = 1704384000pubtime_end_s = 1704556799
转换为可读的 datetime 对象pubtime_begin_s = datetime.datetime(2024, 1, 5, 0, 0)pubtime_end_s = datetime.datetime(2024, 1, 6, 23, 59, 59)
"""
# 转换 start 与 end 为 datetime 对象
2025-07-18 23:26:52 +08:00
start_day: datetime = datetime.strptime(start, "%Y-%m-%d")
end_day: datetime = datetime.strptime(end, "%Y-%m-%d")
if start_day > end_day:
raise ValueError("Wrong time range, please check your start and end argument, to ensure that the start cannot exceed end")
elif start_day == end_day: # 搜索同一天的内容
end_day = (start_day + timedelta(days=1) - timedelta(seconds=1)) # 则将 end_day 设置为 start_day + 1 day - 1 second
else: # 搜索 start 至 end
end_day = (end_day + timedelta(days=1) - timedelta(seconds=1)) # 则将 end_day 设置为 end_day + 1 day - 1 second
# 将其重新转换为时间戳
return str(int(start_day.timestamp())), str(int(end_day.timestamp()))
2025-05-22 20:31:48 +08:00
async def search_by_keywords(self):
2023-12-03 23:19:02 +08:00
"""
search bilibili video with keywords in normal mode
2023-12-03 23:19:02 +08:00
:return:
"""
utils.logger.info("[BilibiliCrawler.search_by_keywords] Begin search bilibli keywords")
2024-05-26 10:53:46 +08:00
bili_limit_count = 20 # bilibili limit page fixed value
2024-04-04 00:11:22 +08:00
if config.CRAWLER_MAX_NOTES_COUNT < bili_limit_count:
config.CRAWLER_MAX_NOTES_COUNT = bili_limit_count
start_page = config.START_PAGE # start page number
for keyword in config.KEYWORDS.split(","):
2024-08-23 08:29:24 +08:00
source_keyword_var.set(keyword)
utils.logger.info(f"[BilibiliCrawler.search_by_keywords] Current search keyword: {keyword}")
page = 1
while (page - start_page + 1) * bili_limit_count <= config.CRAWLER_MAX_NOTES_COUNT:
if page < start_page:
utils.logger.info(f"[BilibiliCrawler.search_by_keywords] Skip page: {page}")
page += 1
continue
utils.logger.info(f"[BilibiliCrawler.search_by_keywords] search bilibili keyword: {keyword}, page: {page}")
video_id_list: List[str] = []
videos_res = await self.bili_client.search_video_by_keyword(
keyword=keyword,
page=page,
page_size=bili_limit_count,
order=SearchOrderType.DEFAULT,
pubtime_begin_s=0, # 作品发布日期起始时间戳
2025-07-18 23:26:52 +08:00
pubtime_end_s=0, # 作品发布日期结束日期时间戳
)
video_list: List[Dict] = videos_res.get("result")
if not video_list:
utils.logger.info(f"[BilibiliCrawler.search_by_keywords] No more videos for '{keyword}', moving to next keyword.")
break
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = []
try:
task_list = [self.get_video_info_task(aid=video_item.get("aid"), bvid="", semaphore=semaphore) for video_item in video_list]
except Exception as e:
utils.logger.warning(f"[BilibiliCrawler.search_by_keywords] error in the task list. The video for this page will not be included. {e}")
video_items = await asyncio.gather(*task_list)
for video_item in video_items:
if video_item:
video_id_list.append(video_item.get("View").get("aid"))
await bilibili_store.update_bilibili_video(video_item)
await bilibili_store.update_up_info(video_item)
await self.get_bilibili_video(video_item, semaphore)
page += 1
2025-11-18 12:24:02 +08:00
# Sleep after page navigation
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[BilibiliCrawler.search_by_keywords] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after page {page-1}")
2025-11-18 12:24:02 +08:00
await self.batch_get_video_comments(video_id_list)
async def search_by_keywords_in_time_range(self, daily_limit: bool):
"""
Search bilibili video with keywords in a given time range.
:param daily_limit: if True, strictly limit the number of notes per day and total.
"""
utils.logger.info(f"[BilibiliCrawler.search_by_keywords_in_time_range] Begin search with daily_limit={daily_limit}")
bili_limit_count = 20
start_page = config.START_PAGE
for keyword in config.KEYWORDS.split(","):
source_keyword_var.set(keyword)
utils.logger.info(f"[BilibiliCrawler.search_by_keywords_in_time_range] Current search keyword: {keyword}")
total_notes_crawled_for_keyword = 0
for day in pd.date_range(start=config.START_DAY, end=config.END_DAY, freq="D"):
if (daily_limit and total_notes_crawled_for_keyword >= config.CRAWLER_MAX_NOTES_COUNT):
utils.logger.info(f"[BilibiliCrawler.search] Reached CRAWLER_MAX_NOTES_COUNT limit for keyword '{keyword}', skipping remaining days.")
break
2025-07-18 23:26:52 +08:00
if (not daily_limit and total_notes_crawled_for_keyword >= config.CRAWLER_MAX_NOTES_COUNT):
utils.logger.info(f"[BilibiliCrawler.search] Reached CRAWLER_MAX_NOTES_COUNT limit for keyword '{keyword}', skipping remaining days.")
break
pubtime_begin_s, pubtime_end_s = await self.get_pubtime_datetime(start=day.strftime("%Y-%m-%d"), end=day.strftime("%Y-%m-%d"))
page = 1
notes_count_this_day = 0
while True:
if notes_count_this_day >= config.MAX_NOTES_PER_DAY:
utils.logger.info(f"[BilibiliCrawler.search] Reached MAX_NOTES_PER_DAY limit for {day.ctime()}.")
break
if (daily_limit and total_notes_crawled_for_keyword >= config.CRAWLER_MAX_NOTES_COUNT):
utils.logger.info(f"[BilibiliCrawler.search] Reached CRAWLER_MAX_NOTES_COUNT limit for keyword '{keyword}'.")
break
if (not daily_limit and total_notes_crawled_for_keyword >= config.CRAWLER_MAX_NOTES_COUNT):
break
2025-03-27 23:18:01 +08:00
try:
utils.logger.info(f"[BilibiliCrawler.search] search bilibili keyword: {keyword}, date: {day.ctime()}, page: {page}")
video_id_list: List[str] = []
videos_res = await self.bili_client.search_video_by_keyword(
keyword=keyword,
page=page,
page_size=bili_limit_count,
order=SearchOrderType.DEFAULT,
pubtime_begin_s=pubtime_begin_s,
2025-07-18 23:26:52 +08:00
pubtime_end_s=pubtime_end_s,
)
video_list: List[Dict] = videos_res.get("result")
if not video_list:
utils.logger.info(f"[BilibiliCrawler.search] No more videos for '{keyword}' on {day.ctime()}, moving to next day.")
break
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = [self.get_video_info_task(aid=video_item.get("aid"), bvid="", semaphore=semaphore) for video_item in video_list]
video_items = await asyncio.gather(*task_list)
for video_item in video_items:
if video_item:
if (daily_limit and total_notes_crawled_for_keyword >= config.CRAWLER_MAX_NOTES_COUNT):
break
if (not daily_limit and total_notes_crawled_for_keyword >= config.CRAWLER_MAX_NOTES_COUNT):
break
if notes_count_this_day >= config.MAX_NOTES_PER_DAY:
break
notes_count_this_day += 1
total_notes_crawled_for_keyword += 1
video_id_list.append(video_item.get("View").get("aid"))
await bilibili_store.update_bilibili_video(video_item)
await bilibili_store.update_up_info(video_item)
await self.get_bilibili_video(video_item, semaphore)
page += 1
2025-11-18 12:24:02 +08:00
# Sleep after page navigation
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[BilibiliCrawler.search_by_keywords_in_time_range] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after page {page-1}")
2025-11-18 12:24:02 +08:00
await self.batch_get_video_comments(video_id_list)
except Exception as e:
utils.logger.error(f"[BilibiliCrawler.search] Error searching on {day.ctime()}: {e}")
break
2023-12-09 21:10:01 +08:00
async def batch_get_video_comments(self, video_id_list: List[str]):
"""
batch get video comments
:param video_id_list:
:return:
"""
if not config.ENABLE_GET_COMMENTS:
utils.logger.info(f"[BilibiliCrawler.batch_get_note_comments] Crawling comment mode is not enabled")
return
utils.logger.info(f"[BilibiliCrawler.batch_get_video_comments] video ids:{video_id_list}")
2023-12-09 21:10:01 +08:00
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list: List[Task] = []
for video_id in video_id_list:
task = asyncio.create_task(self.get_comments(video_id, semaphore), name=video_id)
2023-12-09 21:10:01 +08:00
task_list.append(task)
await asyncio.gather(*task_list)
async def get_comments(self, video_id: str, semaphore: asyncio.Semaphore):
"""
get comment for video id
:param video_id:
:param semaphore:
:return:
"""
async with semaphore:
try:
utils.logger.info(f"[BilibiliCrawler.get_comments] begin get video_id: {video_id} comments ...")
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[BilibiliCrawler.get_comments] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after fetching comments for video {video_id}")
2024-01-03 23:08:30 +08:00
await self.bili_client.get_video_all_comments(
2023-12-09 21:10:01 +08:00
video_id=video_id,
crawl_interval=config.CRAWLER_MAX_SLEEP_SEC,
2024-05-26 10:53:46 +08:00
is_fetch_sub_comments=config.ENABLE_GET_SUB_COMMENTS,
2024-10-23 16:32:02 +08:00
callback=bilibili_store.batch_update_bilibili_video_comments,
max_count=config.CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES,
2023-12-09 21:10:01 +08:00
)
2023-12-09 21:10:01 +08:00
except DataFetchError as ex:
utils.logger.error(f"[BilibiliCrawler.get_comments] get video_id: {video_id} comment error: {ex}")
2023-12-09 21:10:01 +08:00
except Exception as e:
utils.logger.error(f"[BilibiliCrawler.get_comments] may be been blocked, err:{e}")
2025-07-14 08:00:48 +08:00
# Propagate the exception to be caught by the main loop
raise
2023-12-09 21:10:01 +08:00
2024-06-12 15:18:55 +08:00
async def get_creator_videos(self, creator_id: int):
"""
get videos for a creator
:return:
"""
ps = 30
pn = 1
while True:
result = await self.bili_client.get_creator_videos(creator_id, pn, ps)
video_bvids_list = [video["bvid"] for video in result["list"]["vlist"]]
await self.get_specified_videos(video_bvids_list)
if int(result["page"]["count"]) <= pn * ps:
2024-06-12 15:18:55 +08:00
break
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[BilibiliCrawler.get_creator_videos] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after page {pn}")
2024-06-12 15:18:55 +08:00
pn += 1
2025-10-18 06:30:20 +08:00
async def get_specified_videos(self, video_url_list: List[str]):
2023-12-03 23:19:02 +08:00
"""
2025-10-18 06:30:20 +08:00
get specified videos info from URLs or BV IDs
:param video_url_list: List of video URLs or BV IDs
2023-12-03 23:19:02 +08:00
:return:
"""
2025-10-18 06:30:20 +08:00
utils.logger.info("[BilibiliCrawler.get_specified_videos] Parsing video URLs...")
bvids_list = []
for video_url in video_url_list:
try:
video_info = parse_video_info_from_url(video_url)
bvids_list.append(video_info.video_id)
utils.logger.info(f"[BilibiliCrawler.get_specified_videos] Parsed video ID: {video_info.video_id} from {video_url}")
except ValueError as e:
utils.logger.error(f"[BilibiliCrawler.get_specified_videos] Failed to parse video URL: {e}")
continue
2023-12-09 21:10:01 +08:00
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = [self.get_video_info_task(aid=0, bvid=video_id, semaphore=semaphore) for video_id in bvids_list]
2023-12-09 21:10:01 +08:00
video_details = await asyncio.gather(*task_list)
video_aids_list = []
2023-12-09 21:10:01 +08:00
for video_detail in video_details:
if video_detail is not None:
video_item_view: Dict = video_detail.get("View")
video_aid: str = video_item_view.get("aid")
if video_aid:
video_aids_list.append(video_aid)
await bilibili_store.update_bilibili_video(video_detail)
await bilibili_store.update_up_info(video_detail)
2024-07-12 20:09:16 +08:00
await self.get_bilibili_video(video_detail, semaphore)
await self.batch_get_video_comments(video_aids_list)
2023-12-09 21:10:01 +08:00
async def get_video_info_task(self, aid: int, bvid: str, semaphore: asyncio.Semaphore) -> Optional[Dict]:
2023-12-09 21:10:01 +08:00
"""
Get video detail task
:param aid:
:param bvid:
2023-12-09 21:10:01 +08:00
:param semaphore:
:return:
"""
async with semaphore:
try:
result = await self.bili_client.get_video_info(aid=aid, bvid=bvid)
2025-11-18 12:24:02 +08:00
# Sleep after fetching video details
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[BilibiliCrawler.get_video_info_task] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after fetching video details {bvid or aid}")
2025-11-18 12:24:02 +08:00
2023-12-09 21:10:01 +08:00
return result
except DataFetchError as ex:
utils.logger.error(f"[BilibiliCrawler.get_video_info_task] Get video detail error: {ex}")
2023-12-09 21:10:01 +08:00
return None
except KeyError as ex:
utils.logger.error(f"[BilibiliCrawler.get_video_info_task] have not fund note detail video_id:{bvid}, err: {ex}")
2023-12-09 21:10:01 +08:00
return None
2023-12-03 23:19:02 +08:00
async def get_video_play_url_task(self, aid: int, cid: int, semaphore: asyncio.Semaphore) -> Union[Dict, None]:
2025-07-18 23:26:52 +08:00
"""
Get video play url
:param aid:
:param cid:
:param semaphore:
:return:
2024-07-12 20:09:16 +08:00
"""
async with semaphore:
try:
result = await self.bili_client.get_video_play_url(aid=aid, cid=cid)
return result
except DataFetchError as ex:
utils.logger.error(f"[BilibiliCrawler.get_video_play_url_task] Get video play url error: {ex}")
2024-07-12 20:09:16 +08:00
return None
except KeyError as ex:
utils.logger.error(f"[BilibiliCrawler.get_video_play_url_task] have not fund play url from :{aid}|{cid}, err: {ex}")
2024-07-12 20:09:16 +08:00
return None
async def create_bilibili_client(self, httpx_proxy: Optional[str]) -> BilibiliClient:
2024-09-02 21:45:12 +08:00
"""
create bilibili client
:param httpx_proxy: httpx proxy
:return: bilibili client
"""
utils.logger.info("[BilibiliCrawler.create_bilibili_client] Begin create bilibili API client ...")
cookie_str, cookie_dict = utils.convert_cookies(await self.browser_context.cookies())
2023-12-03 23:19:02 +08:00
bilibili_client_obj = BilibiliClient(
proxy=httpx_proxy,
2023-12-03 23:19:02 +08:00
headers={
"User-Agent": self.user_agent,
"Cookie": cookie_str,
"Origin": "https://www.bilibili.com",
"Referer": "https://www.bilibili.com",
2025-07-18 23:26:52 +08:00
"Content-Type": "application/json;charset=UTF-8",
2023-12-03 23:19:02 +08:00
},
playwright_page=self.context_page,
cookie_dict=cookie_dict,
)
return bilibili_client_obj
async def launch_browser(
2025-07-18 23:26:52 +08:00
self,
chromium: BrowserType,
playwright_proxy: Optional[Dict],
user_agent: Optional[str],
headless: bool = True,
2023-12-03 23:19:02 +08:00
) -> BrowserContext:
2025-07-18 23:26:52 +08:00
"""
2024-09-02 21:45:12 +08:00
launch browser and create browser context
:param chromium: chromium browser
:param playwright_proxy: playwright proxy
:param user_agent: user agent
:param headless: headless mode
:return: browser context
"""
utils.logger.info("[BilibiliCrawler.launch_browser] Begin create browser context ...")
2023-12-03 23:19:02 +08:00
if config.SAVE_LOGIN_STATE:
# feat issue #14
# we will save login state to avoid login every time
user_data_dir = os.path.join(os.getcwd(), "browser_data", config.USER_DATA_DIR % config.PLATFORM) # type: ignore
2023-12-03 23:19:02 +08:00
browser_context = await chromium.launch_persistent_context(
user_data_dir=user_data_dir,
accept_downloads=True,
headless=headless,
proxy=playwright_proxy, # type: ignore
viewport={
"width": 1920,
"height": 1080
},
2025-07-18 23:26:52 +08:00
user_agent=user_agent,
2025-11-06 10:40:30 +08:00
channel="chrome", # 使用系统的Chrome稳定版
2023-12-03 23:19:02 +08:00
)
return browser_context
else:
2024-05-26 10:53:46 +08:00
# type: ignore
2025-11-06 10:40:30 +08:00
browser = await chromium.launch(headless=headless, proxy=playwright_proxy, channel="chrome")
browser_context = await browser.new_context(viewport={"width": 1920, "height": 1080}, user_agent=user_agent)
2023-12-03 23:19:02 +08:00
return browser_context
2024-07-12 20:09:16 +08:00
2025-07-18 23:26:52 +08:00
async def launch_browser_with_cdp(
self,
playwright: Playwright,
playwright_proxy: Optional[Dict],
user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
"""
使用CDP模式启动浏览器
"""
try:
self.cdp_manager = CDPBrowserManager()
browser_context = await self.cdp_manager.launch_and_connect(
playwright=playwright,
playwright_proxy=playwright_proxy,
user_agent=user_agent,
2025-07-18 23:26:52 +08:00
headless=headless,
)
# 显示浏览器信息
browser_info = await self.cdp_manager.get_browser_info()
utils.logger.info(f"[BilibiliCrawler] CDP浏览器信息: {browser_info}")
return browser_context
except Exception as e:
utils.logger.error(f"[BilibiliCrawler] CDP模式启动失败回退到标准模式: {e}")
# 回退到标准模式
chromium = playwright.chromium
return await self.launch_browser(chromium, playwright_proxy, user_agent, headless)
async def close(self):
"""Close browser context"""
try:
# 如果使用CDP模式需要特殊处理
if self.cdp_manager:
await self.cdp_manager.cleanup()
self.cdp_manager = None
elif self.browser_context:
await self.browser_context.close()
utils.logger.info("[BilibiliCrawler.close] Browser context closed ...")
except TargetClosedError:
utils.logger.warning("[BilibiliCrawler.close] Browser context was already closed.")
except Exception as e:
utils.logger.error(f"[BilibiliCrawler.close] An error occurred during close: {e}")
2024-07-12 20:09:16 +08:00
async def get_bilibili_video(self, video_item: Dict, semaphore: asyncio.Semaphore):
"""
download bilibili video
:param video_item:
:param semaphore:
:return:
"""
if not config.ENABLE_GET_MEIDAS:
utils.logger.info(f"[BilibiliCrawler.get_bilibili_video] Crawling image mode is not enabled")
2024-07-12 20:09:16 +08:00
return
video_item_view: Dict = video_item.get("View")
aid = video_item_view.get("aid")
cid = video_item_view.get("cid")
result = await self.get_video_play_url_task(aid, cid, semaphore)
if result is None:
utils.logger.info("[BilibiliCrawler.get_bilibili_video] get video play url failed")
2024-07-12 20:09:16 +08:00
return
durl_list = result.get("durl")
max_size = -1
video_url = ""
for durl in durl_list:
size = durl.get("size")
if size > max_size:
max_size = size
video_url = durl.get("url")
if video_url == "":
utils.logger.info("[BilibiliCrawler.get_bilibili_video] get video url failed")
2024-07-12 20:09:16 +08:00
return
content = await self.bili_client.get_video_media(video_url)
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[BilibiliCrawler.get_bilibili_video] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after fetching video {aid}")
2024-07-12 20:09:16 +08:00
if content is None:
return
extension_file_name = f"video.mp4"
await bilibili_store.store_video(aid, content, extension_file_name)
2025-10-18 06:30:20 +08:00
async def get_all_creator_details(self, creator_url_list: List[str]):
2025-05-19 19:57:36 +08:00
"""
2025-10-18 06:30:20 +08:00
creator_url_list: get details for creator from creator URL list
2025-05-19 19:57:36 +08:00
"""
2025-10-18 06:30:20 +08:00
utils.logger.info(f"[BilibiliCrawler.get_all_creator_details] Crawling the details of creators")
utils.logger.info(f"[BilibiliCrawler.get_all_creator_details] Parsing creator URLs...")
creator_id_list = []
for creator_url in creator_url_list:
try:
creator_info = parse_creator_info_from_url(creator_url)
creator_id_list.append(int(creator_info.creator_id))
utils.logger.info(f"[BilibiliCrawler.get_all_creator_details] Parsed creator ID: {creator_info.creator_id} from {creator_url}")
except ValueError as e:
utils.logger.error(f"[BilibiliCrawler.get_all_creator_details] Failed to parse creator URL: {e}")
continue
utils.logger.info(f"[BilibiliCrawler.get_all_creator_details] creator ids:{creator_id_list}")
2025-05-19 19:57:36 +08:00
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list: List[Task] = []
try:
for creator_id in creator_id_list:
2025-10-18 06:30:20 +08:00
task = asyncio.create_task(self.get_creator_details(creator_id, semaphore), name=str(creator_id))
2025-05-19 19:57:36 +08:00
task_list.append(task)
except Exception as e:
utils.logger.warning(f"[BilibiliCrawler.get_all_creator_details] error in the task list. The creator will not be included. {e}")
2025-05-19 19:57:36 +08:00
await asyncio.gather(*task_list)
async def get_creator_details(self, creator_id: int, semaphore: asyncio.Semaphore):
"""
get details for creator id
:param creator_id:
:param semaphore:
:return:
"""
async with semaphore:
creator_unhandled_info: Dict = await self.bili_client.get_creator_info(creator_id)
2025-05-19 19:57:36 +08:00
creator_info: Dict = {
"id": creator_id,
"name": creator_unhandled_info.get("name"),
"sign": creator_unhandled_info.get("sign"),
"avatar": creator_unhandled_info.get("face"),
}
2025-05-22 22:06:06 +08:00
await self.get_fans(creator_info, semaphore)
await self.get_followings(creator_info, semaphore)
2025-05-22 20:31:48 +08:00
await self.get_dynamics(creator_info, semaphore)
2025-05-19 19:57:36 +08:00
async def get_fans(self, creator_info: Dict, semaphore: asyncio.Semaphore):
"""
get fans for creator id
:param creator_info:
:param semaphore:
:return:
"""
creator_id = creator_info["id"]
async with semaphore:
try:
utils.logger.info(f"[BilibiliCrawler.get_fans] begin get creator_id: {creator_id} fans ...")
2025-05-19 19:57:36 +08:00
await self.bili_client.get_creator_all_fans(
creator_info=creator_info,
crawl_interval=config.CRAWLER_MAX_SLEEP_SEC,
2025-05-19 19:57:36 +08:00
callback=bilibili_store.batch_update_bilibili_creator_fans,
2025-05-22 20:31:48 +08:00
max_count=config.CRAWLER_MAX_CONTACTS_COUNT_SINGLENOTES,
2025-05-19 19:57:36 +08:00
)
except DataFetchError as ex:
utils.logger.error(f"[BilibiliCrawler.get_fans] get creator_id: {creator_id} fans error: {ex}")
2025-05-19 19:57:36 +08:00
except Exception as e:
utils.logger.error(f"[BilibiliCrawler.get_fans] may be been blocked, err:{e}")
2025-05-19 19:57:36 +08:00
async def get_followings(self, creator_info: Dict, semaphore: asyncio.Semaphore):
"""
get followings for creator id
:param creator_info:
:param semaphore:
:return:
"""
creator_id = creator_info["id"]
async with semaphore:
try:
utils.logger.info(f"[BilibiliCrawler.get_followings] begin get creator_id: {creator_id} followings ...")
2025-05-19 19:57:36 +08:00
await self.bili_client.get_creator_all_followings(
creator_info=creator_info,
crawl_interval=config.CRAWLER_MAX_SLEEP_SEC,
2025-05-19 19:57:36 +08:00
callback=bilibili_store.batch_update_bilibili_creator_followings,
2025-05-22 20:31:48 +08:00
max_count=config.CRAWLER_MAX_CONTACTS_COUNT_SINGLENOTES,
2025-05-19 19:57:36 +08:00
)
except DataFetchError as ex:
utils.logger.error(f"[BilibiliCrawler.get_followings] get creator_id: {creator_id} followings error: {ex}")
2025-05-19 19:57:36 +08:00
except Exception as e:
utils.logger.error(f"[BilibiliCrawler.get_followings] may be been blocked, err:{e}")
2025-05-22 20:31:48 +08:00
async def get_dynamics(self, creator_info: Dict, semaphore: asyncio.Semaphore):
"""
get dynamics for creator id
:param creator_info:
:param semaphore:
:return:
"""
creator_id = creator_info["id"]
async with semaphore:
try:
utils.logger.info(f"[BilibiliCrawler.get_dynamics] begin get creator_id: {creator_id} dynamics ...")
2025-05-22 20:31:48 +08:00
await self.bili_client.get_creator_all_dynamics(
creator_info=creator_info,
crawl_interval=config.CRAWLER_MAX_SLEEP_SEC,
2025-05-22 20:31:48 +08:00
callback=bilibili_store.batch_update_bilibili_creator_dynamics,
max_count=config.CRAWLER_MAX_DYNAMICS_COUNT_SINGLENOTES,
)
except DataFetchError as ex:
utils.logger.error(f"[BilibiliCrawler.get_dynamics] get creator_id: {creator_id} dynamics error: {ex}")
2025-05-22 20:31:48 +08:00
except Exception as e:
utils.logger.error(f"[BilibiliCrawler.get_dynamics] may be been blocked, err:{e}")