Files
MediaCrawler/media_platform/weibo/core.py

409 lines
18 KiB
Python
Raw Normal View History

2025-11-18 12:24:02 +08:00
# -*- coding: utf-8 -*-
# Copyright (c) 2025 relakkes@gmail.com
#
# This file is part of MediaCrawler project.
# Repository: https://github.com/NanmiCoder/MediaCrawler/blob/main/media_platform/weibo/core.py
# GitHub: https://github.com/NanmiCoder
# Licensed under NON-COMMERCIAL LEARNING LICENSE 1.1
#
2025-07-18 23:26:52 +08:00
# 声明:本代码仅供学习和研究目的使用。使用者应遵守以下原则:
# 1. 不得用于任何商业用途。
# 2. 使用时应遵守目标平台的使用条款和robots.txt规则。
# 3. 不得进行大规模爬取或对平台造成运营干扰。
# 4. 应合理控制请求频率,避免给目标平台带来不必要的负担。
# 5. 不得用于任何非法或不当的用途。
2025-07-18 23:26:52 +08:00
#
# 详细许可条款请参阅项目根目录下的LICENSE文件。
# 使用本代码即表示您同意遵守上述原则和LICENSE中的所有条款。
2023-12-24 17:57:48 +08:00
# -*- coding: utf-8 -*-
# @Author : relakkes@gmail.com
# @Time : 2023/12/23 15:41
# @Desc : 微博爬虫主流程代码
import asyncio
import os
# import random # Removed as we now use fixed config.CRAWLER_MAX_SLEEP_SEC intervals
2023-12-24 17:57:48 +08:00
from asyncio import Task
2023-12-30 18:54:21 +08:00
from typing import Dict, List, Optional, Tuple
2023-12-24 17:57:48 +08:00
2025-07-18 23:26:52 +08:00
from playwright.async_api import (
BrowserContext,
BrowserType,
Page,
Playwright,
async_playwright,
)
2024-04-17 23:13:40 +08:00
2025-07-18 23:26:52 +08:00
import config
2024-04-17 23:13:40 +08:00
from base.base_crawler import AbstractCrawler
2023-12-24 17:57:48 +08:00
from proxy.proxy_ip_pool import IpInfoModel, create_ip_pool
from store import weibo as weibo_store
2023-12-24 17:57:48 +08:00
from tools import utils
from tools.cdp_browser import CDPBrowserManager
2024-08-23 08:29:24 +08:00
from var import crawler_type_var, source_keyword_var
2023-12-24 17:57:48 +08:00
from .client import WeiboClient
from .exception import DataFetchError
from .field import SearchType
from .help import filter_search_result_card
from .login import WeiboLogin
2023-12-24 17:57:48 +08:00
class WeiboCrawler(AbstractCrawler):
context_page: Page
wb_client: WeiboClient
browser_context: BrowserContext
cdp_manager: Optional[CDPBrowserManager]
2023-12-24 17:57:48 +08:00
def __init__(self):
2023-12-30 18:54:21 +08:00
self.index_url = "https://www.weibo.com"
self.mobile_index_url = "https://m.weibo.cn"
2023-12-30 18:54:21 +08:00
self.user_agent = utils.get_user_agent()
self.mobile_user_agent = utils.get_mobile_user_agent()
self.cdp_manager = None
2023-12-24 17:57:48 +08:00
async def start(self):
playwright_proxy_format, httpx_proxy_format = None, None
if config.ENABLE_IP_PROXY:
ip_proxy_pool = await create_ip_pool(config.IP_PROXY_POOL_COUNT, enable_validate_ip=True)
2023-12-24 17:57:48 +08:00
ip_proxy_info: IpInfoModel = await ip_proxy_pool.get_proxy()
playwright_proxy_format, httpx_proxy_format = utils.format_proxy_info(ip_proxy_info)
2023-12-24 17:57:48 +08:00
async with async_playwright() as playwright:
# 根据配置选择启动模式
if config.ENABLE_CDP_MODE:
utils.logger.info("[WeiboCrawler] 使用CDP模式启动浏览器")
self.browser_context = await self.launch_browser_with_cdp(
2025-07-18 23:26:52 +08:00
playwright,
playwright_proxy_format,
self.mobile_user_agent,
headless=config.CDP_HEADLESS,
)
else:
utils.logger.info("[WeiboCrawler] 使用标准模式启动浏览器")
# Launch a browser context.
chromium = playwright.chromium
self.browser_context = await self.launch_browser(chromium, None, self.mobile_user_agent, headless=config.HEADLESS)
# stealth.min.js is a js script to prevent the website from detecting the crawler.
await self.browser_context.add_init_script(path="libs/stealth.min.js")
2023-12-24 17:57:48 +08:00
self.context_page = await self.browser_context.new_page()
2025-11-17 17:11:35 +08:00
await self.context_page.goto(self.index_url)
await asyncio.sleep(2)
2023-12-24 17:57:48 +08:00
2025-11-18 12:24:02 +08:00
2023-12-24 17:57:48 +08:00
# Create a client to interact with the xiaohongshu website.
self.wb_client = await self.create_weibo_client(httpx_proxy_format)
if not await self.wb_client.pong():
login_obj = WeiboLogin(
login_type=config.LOGIN_TYPE,
2023-12-24 17:57:48 +08:00
login_phone="", # your phone number
browser_context=self.browser_context,
context_page=self.context_page,
2025-07-18 23:26:52 +08:00
cookie_str=config.COOKIES,
2023-12-24 17:57:48 +08:00
)
await login_obj.begin()
# 登录成功后重定向到手机端的网站再更新手机端登录成功的cookie
utils.logger.info("[WeiboCrawler.start] redirect weibo mobile homepage and update cookies on mobile platform")
await self.context_page.goto(self.mobile_index_url)
2025-11-17 17:11:35 +08:00
await asyncio.sleep(3)
# 只获取移动端的 cookies避免 PC 端和移动端 cookies 混淆
await self.wb_client.update_cookies(
browser_context=self.browser_context,
urls=[self.mobile_index_url]
)
2023-12-24 17:57:48 +08:00
crawler_type_var.set(config.CRAWLER_TYPE)
if config.CRAWLER_TYPE == "search":
2023-12-24 17:57:48 +08:00
# Search for video and retrieve their comment information.
await self.search()
elif config.CRAWLER_TYPE == "detail":
2023-12-24 17:57:48 +08:00
# Get the information and comments of the specified post
await self.get_specified_notes()
2024-08-24 05:52:11 +08:00
elif config.CRAWLER_TYPE == "creator":
# Get creator's information and their notes and comments
await self.get_creators_and_notes()
2023-12-24 17:57:48 +08:00
else:
pass
utils.logger.info("[WeiboCrawler.start] Weibo Crawler finished ...")
2023-12-24 17:57:48 +08:00
async def search(self):
"""
search weibo note with keywords
:return:
"""
utils.logger.info("[WeiboCrawler.search] Begin search weibo keywords")
2024-04-04 00:11:22 +08:00
weibo_limit_count = 10 # weibo limit page fixed value
if config.CRAWLER_MAX_NOTES_COUNT < weibo_limit_count:
config.CRAWLER_MAX_NOTES_COUNT = weibo_limit_count
start_page = config.START_PAGE
# Set the search type based on the configuration for weibo
if config.WEIBO_SEARCH_TYPE == "default":
search_type = SearchType.DEFAULT
elif config.WEIBO_SEARCH_TYPE == "real_time":
search_type = SearchType.REAL_TIME
elif config.WEIBO_SEARCH_TYPE == "popular":
search_type = SearchType.POPULAR
elif config.WEIBO_SEARCH_TYPE == "video":
search_type = SearchType.VIDEO
else:
utils.logger.error(f"[WeiboCrawler.search] Invalid WEIBO_SEARCH_TYPE: {config.WEIBO_SEARCH_TYPE}")
return
for keyword in config.KEYWORDS.split(","):
2024-08-23 08:29:24 +08:00
source_keyword_var.set(keyword)
utils.logger.info(f"[WeiboCrawler.search] Current search keyword: {keyword}")
2023-12-24 17:57:48 +08:00
page = 1
while (page - start_page + 1) * weibo_limit_count <= config.CRAWLER_MAX_NOTES_COUNT:
if page < start_page:
utils.logger.info(f"[WeiboCrawler.search] Skip page: {page}")
page += 1
continue
utils.logger.info(f"[WeiboCrawler.search] search weibo keyword: {keyword}, page: {page}")
search_res = await self.wb_client.get_note_by_keyword(keyword=keyword, page=page, search_type=search_type)
2023-12-24 17:57:48 +08:00
note_id_list: List[str] = []
note_list = filter_search_result_card(search_res.get("cards"))
for note_item in note_list:
if note_item:
2023-12-24 17:57:48 +08:00
mblog: Dict = note_item.get("mblog")
if mblog:
note_id_list.append(mblog.get("id"))
await weibo_store.update_weibo_note(note_item)
await self.get_note_images(mblog)
2023-12-24 17:57:48 +08:00
page += 1
2025-11-18 12:24:02 +08:00
# Sleep after page navigation
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[WeiboCrawler.search] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after page {page-1}")
2025-11-18 12:24:02 +08:00
await self.batch_get_notes_comments(note_id_list)
async def get_specified_notes(self):
"""
get specified notes info
:return:
"""
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list = [self.get_note_info_task(note_id=note_id, semaphore=semaphore) for note_id in config.WEIBO_SPECIFIED_ID_LIST]
video_details = await asyncio.gather(*task_list)
for note_item in video_details:
if note_item:
await weibo_store.update_weibo_note(note_item)
await self.batch_get_notes_comments(config.WEIBO_SPECIFIED_ID_LIST)
async def get_note_info_task(self, note_id: str, semaphore: asyncio.Semaphore) -> Optional[Dict]:
"""
Get note detail task
:param note_id:
:param semaphore:
:return:
"""
async with semaphore:
try:
result = await self.wb_client.get_note_info_by_id(note_id)
2025-11-18 12:24:02 +08:00
# Sleep after fetching note details
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[WeiboCrawler.get_note_info_task] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after fetching note details {note_id}")
2025-11-18 12:24:02 +08:00
return result
except DataFetchError as ex:
utils.logger.error(f"[WeiboCrawler.get_note_info_task] Get note detail error: {ex}")
return None
except KeyError as ex:
utils.logger.error(f"[WeiboCrawler.get_note_info_task] have not fund note detail note_id:{note_id}, err: {ex}")
return None
async def batch_get_notes_comments(self, note_id_list: List[str]):
"""
batch get notes comments
:param note_id_list:
:return:
"""
if not config.ENABLE_GET_COMMENTS:
utils.logger.info(f"[WeiboCrawler.batch_get_note_comments] Crawling comment mode is not enabled")
return
utils.logger.info(f"[WeiboCrawler.batch_get_notes_comments] note ids:{note_id_list}")
semaphore = asyncio.Semaphore(config.MAX_CONCURRENCY_NUM)
task_list: List[Task] = []
for note_id in note_id_list:
task = asyncio.create_task(self.get_note_comments(note_id, semaphore), name=note_id)
task_list.append(task)
await asyncio.gather(*task_list)
async def get_note_comments(self, note_id: str, semaphore: asyncio.Semaphore):
"""
get comment for note id
:param note_id:
:param semaphore:
:return:
"""
async with semaphore:
try:
utils.logger.info(f"[WeiboCrawler.get_note_comments] begin get note_id: {note_id} comments ...")
2025-11-18 12:24:02 +08:00
# Sleep before fetching comments
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[WeiboCrawler.get_note_comments] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds before fetching comments for note {note_id}")
2025-11-18 12:24:02 +08:00
await self.wb_client.get_note_all_comments(
note_id=note_id,
crawl_interval=config.CRAWLER_MAX_SLEEP_SEC, # Use fixed interval instead of random
2024-10-23 16:32:02 +08:00
callback=weibo_store.batch_update_weibo_note_comments,
2025-07-18 23:26:52 +08:00
max_count=config.CRAWLER_MAX_COMMENTS_COUNT_SINGLENOTES,
)
except DataFetchError as ex:
utils.logger.error(f"[WeiboCrawler.get_note_comments] get note_id: {note_id} comment error: {ex}")
except Exception as e:
utils.logger.error(f"[WeiboCrawler.get_note_comments] may be been blocked, err:{e}")
2023-12-24 17:57:48 +08:00
async def get_note_images(self, mblog: Dict):
"""
get note images
:param mblog:
:return:
"""
if not config.ENABLE_GET_MEIDAS:
utils.logger.info(f"[WeiboCrawler.get_note_images] Crawling image mode is not enabled")
return
2025-07-18 23:26:52 +08:00
pics: Dict = mblog.get("pics")
if not pics:
return
for pic in pics:
url = pic.get("url")
if not url:
continue
content = await self.wb_client.get_note_image(url)
await asyncio.sleep(config.CRAWLER_MAX_SLEEP_SEC)
utils.logger.info(f"[WeiboCrawler.get_note_images] Sleeping for {config.CRAWLER_MAX_SLEEP_SEC} seconds after fetching image")
if content != None:
extension_file_name = url.split(".")[-1]
await weibo_store.update_weibo_note_image(pic["pid"], content, extension_file_name)
2024-08-24 05:52:11 +08:00
async def get_creators_and_notes(self) -> None:
"""
Get creator's information and their notes and comments
Returns:
"""
utils.logger.info("[WeiboCrawler.get_creators_and_notes] Begin get weibo creators")
2024-08-24 05:52:11 +08:00
for user_id in config.WEIBO_CREATOR_ID_LIST:
createor_info_res: Dict = await self.wb_client.get_creator_info_by_id(creator_id=user_id)
2024-08-24 05:52:11 +08:00
if createor_info_res:
createor_info: Dict = createor_info_res.get("userInfo", {})
utils.logger.info(f"[WeiboCrawler.get_creators_and_notes] creator info: {createor_info}")
2024-08-24 05:52:11 +08:00
if not createor_info:
raise DataFetchError("Get creator info error")
await weibo_store.save_creator(user_id, user_info=createor_info)
# Get all note information of the creator
all_notes_list = await self.wb_client.get_all_notes_by_creator_id(
creator_id=user_id,
2025-11-06 19:43:09 +08:00
container_id=f"107603{user_id}",
2024-08-24 05:52:11 +08:00
crawl_interval=0,
2025-07-18 23:26:52 +08:00
callback=weibo_store.batch_update_weibo_notes,
2024-08-24 05:52:11 +08:00
)
note_ids = [note_item.get("mblog", {}).get("id") for note_item in all_notes_list if note_item.get("mblog", {}).get("id")]
2024-08-24 05:52:11 +08:00
await self.batch_get_notes_comments(note_ids)
else:
utils.logger.error(f"[WeiboCrawler.get_creators_and_notes] get creator info error, creator_id:{user_id}")
2024-08-24 05:52:11 +08:00
2023-12-24 17:57:48 +08:00
async def create_weibo_client(self, httpx_proxy: Optional[str]) -> WeiboClient:
"""Create xhs client"""
utils.logger.info("[WeiboCrawler.create_weibo_client] Begin create weibo API client ...")
2025-11-17 17:24:47 +08:00
cookie_str, cookie_dict = utils.convert_cookies(await self.browser_context.cookies(urls=[self.mobile_index_url]))
2023-12-24 17:57:48 +08:00
weibo_client_obj = WeiboClient(
proxy=httpx_proxy,
2023-12-24 17:57:48 +08:00
headers={
2023-12-30 18:54:21 +08:00
"User-Agent": utils.get_mobile_user_agent(),
2023-12-24 17:57:48 +08:00
"Cookie": cookie_str,
"Origin": "https://m.weibo.cn",
"Referer": "https://m.weibo.cn",
2025-07-18 23:26:52 +08:00
"Content-Type": "application/json;charset=UTF-8",
2023-12-24 17:57:48 +08:00
},
playwright_page=self.context_page,
cookie_dict=cookie_dict,
)
return weibo_client_obj
async def launch_browser(
2025-07-18 23:26:52 +08:00
self,
chromium: BrowserType,
playwright_proxy: Optional[Dict],
user_agent: Optional[str],
headless: bool = True,
2023-12-24 17:57:48 +08:00
) -> BrowserContext:
"""Launch browser and create browser context"""
utils.logger.info("[WeiboCrawler.launch_browser] Begin create browser context ...")
2023-12-24 17:57:48 +08:00
if config.SAVE_LOGIN_STATE:
user_data_dir = os.path.join(os.getcwd(), "browser_data", config.USER_DATA_DIR % config.PLATFORM) # type: ignore
2023-12-24 17:57:48 +08:00
browser_context = await chromium.launch_persistent_context(
user_data_dir=user_data_dir,
accept_downloads=True,
headless=headless,
proxy=playwright_proxy, # type: ignore
viewport={
"width": 1920,
"height": 1080
},
2025-07-18 23:26:52 +08:00
user_agent=user_agent,
2025-11-06 10:40:30 +08:00
channel="chrome", # 使用系统的Chrome稳定版
2023-12-24 17:57:48 +08:00
)
return browser_context
else:
2025-11-06 10:40:30 +08:00
browser = await chromium.launch(headless=headless, proxy=playwright_proxy, channel="chrome") # type: ignore
browser_context = await browser.new_context(viewport={"width": 1920, "height": 1080}, user_agent=user_agent)
2023-12-24 17:57:48 +08:00
return browser_context
2025-07-18 23:26:52 +08:00
async def launch_browser_with_cdp(
self,
playwright: Playwright,
playwright_proxy: Optional[Dict],
user_agent: Optional[str],
headless: bool = True,
) -> BrowserContext:
"""
使用CDP模式启动浏览器
"""
try:
self.cdp_manager = CDPBrowserManager()
browser_context = await self.cdp_manager.launch_and_connect(
playwright=playwright,
playwright_proxy=playwright_proxy,
user_agent=user_agent,
2025-07-18 23:26:52 +08:00
headless=headless,
)
# 显示浏览器信息
browser_info = await self.cdp_manager.get_browser_info()
utils.logger.info(f"[WeiboCrawler] CDP浏览器信息: {browser_info}")
return browser_context
except Exception as e:
utils.logger.error(f"[WeiboCrawler] CDP模式启动失败回退到标准模式: {e}")
# 回退到标准模式
chromium = playwright.chromium
return await self.launch_browser(chromium, playwright_proxy, user_agent, headless)
async def close(self):
"""Close browser context"""
# 如果使用CDP模式需要特殊处理
if self.cdp_manager:
await self.cdp_manager.cleanup()
self.cdp_manager = None
else:
await self.browser_context.close()
utils.logger.info("[WeiboCrawler.close] Browser context closed ...")