Compare commits

...

11 Commits

Author SHA1 Message Date
Xinrea
6d9cd3c6a8 fix: danmu reconnection (#167) 2025-08-20 23:08:41 +08:00
Xinrea
303b2f7036 fix: record breaks after stream expired (#166) 2025-08-20 22:55:06 +08:00
Xinrea
ec25c2ffd9 bump version to 2.11.6 2025-08-20 22:23:25 +08:00
Xinrea
50ab608ddb fix: cache/output dir migration (close #159) (#165)
* fix: cache/output dir migration (close #159)

* chore: adjust wrong log info

* fix: more accurate way to check path
2025-08-20 22:15:46 +08:00
Xinrea
3c76be9b81 feat: add batch delete for archives API and tool 2025-08-19 00:27:59 +08:00
Xinrea
ab7f0cf0b4 bump version to 2.11.5 2025-08-15 22:50:38 +08:00
Xinrea
f9f590c4dc fix: docker start with nscd 2025-08-15 22:47:52 +08:00
Xinrea
8d38fe582a fix: ffprobe segment falt in docker environment 2025-08-15 22:31:10 +08:00
Xinrea
dc4a26561d bump version to 2.11.4 2025-08-14 22:08:44 +08:00
Xinrea
10c1d1f3a8 feat: add video export button in clip list (close #156) 2025-08-14 22:05:11 +08:00
Xinrea
66bcf53d01 fix: database operation optimization (close #157) 2025-08-14 21:52:08 +08:00
17 changed files with 585 additions and 157 deletions

View File

@@ -65,9 +65,16 @@ RUN apt-get update && apt-get install -y \
libssl3 \
ca-certificates \
fonts-wqy-microhei \
netbase \
nscd \
&& update-ca-certificates \
&& rm -rf /var/lib/apt/lists/*
RUN touch /etc/netgroup
RUN mkdir -p /var/run/nscd && chmod 755 /var/run/nscd
RUN nscd
# Add /app to PATH
ENV PATH="/app:${PATH}"
@@ -83,4 +90,4 @@ COPY --from=rust-builder /app/src-tauri/ffprobe ./ffprobe
EXPOSE 3000
# Run the application
CMD ["./bili-shadowreplay"]
CMD ["sh", "-c", "nscd && ./bili-shadowreplay"]

View File

@@ -1,7 +1,7 @@
{
"name": "bili-shadowreplay",
"private": true,
"version": "2.11.3",
"version": "2.11.6",
"type": "module",
"scripts": {
"dev": "vite",

2
src-tauri/Cargo.lock generated
View File

@@ -537,7 +537,7 @@ checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba"
[[package]]
name = "bili-shadowreplay"
version = "2.11.3"
version = "2.11.6"
dependencies = [
"async-ffmpeg-sidecar",
"async-std",

View File

@@ -4,7 +4,7 @@ resolver = "2"
[package]
name = "bili-shadowreplay"
version = "2.11.3"
version = "2.11.6"
description = "BiliBili ShadowReplay"
authors = ["Xinrea"]
license = ""

View File

@@ -65,7 +65,6 @@ impl DanmuProvider for BiliDanmu {
tx: mpsc::UnboundedSender<DanmuMessageType>,
) -> Result<(), DanmuStreamError> {
let mut retry_count = 0;
const MAX_RETRIES: u32 = 5;
const RETRY_DELAY: Duration = Duration::from_secs(5);
info!(
"Bilibili WebSocket connection started, room_id: {}",
@@ -74,33 +73,37 @@ impl DanmuProvider for BiliDanmu {
loop {
if *self.stop.read().await {
info!(
"Bilibili WebSocket connection stopped, room_id: {}",
self.room_id
);
break;
}
match self.connect_and_handle(tx.clone()).await {
Ok(_) => {
info!("Bilibili WebSocket connection closed normally");
info!(
"Bilibili WebSocket connection closed normally, room_id: {}",
self.room_id
);
break;
}
Err(e) => {
error!("Bilibili WebSocket connection error: {}", e);
retry_count += 1;
if retry_count >= MAX_RETRIES {
return Err(DanmuStreamError::WebsocketError {
err: format!("Failed to connect after {} retries", MAX_RETRIES),
});
}
info!(
"Retrying connection in {} seconds... (Attempt {}/{})",
RETRY_DELAY.as_secs(),
retry_count,
MAX_RETRIES
error!(
"Bilibili WebSocket connection error, room_id: {}, error: {}",
self.room_id, e
);
tokio::time::sleep(RETRY_DELAY).await;
retry_count += 1;
}
}
info!(
"Retrying connection in {} seconds... (Attempt {}), room_id: {}",
RETRY_DELAY.as_secs(),
retry_count,
self.room_id
);
tokio::time::sleep(RETRY_DELAY).await;
}
Ok(())

View File

@@ -18,14 +18,21 @@ pub struct RecordRow {
// CREATE TABLE records (live_id INTEGER PRIMARY KEY, room_id INTEGER, title TEXT, length INTEGER, size INTEGER, created_at TEXT);
impl Database {
pub async fn get_records(&self, room_id: u64) -> Result<Vec<RecordRow>, DatabaseError> {
pub async fn get_records(
&self,
room_id: u64,
offset: u64,
limit: u64,
) -> Result<Vec<RecordRow>, DatabaseError> {
let lock = self.db.read().await.clone().unwrap();
Ok(
sqlx::query_as::<_, RecordRow>("SELECT * FROM records WHERE room_id = $1")
.bind(room_id as i64)
.fetch_all(&lock)
.await?,
Ok(sqlx::query_as::<_, RecordRow>(
"SELECT * FROM records WHERE room_id = $1 ORDER BY created_at DESC LIMIT $2 OFFSET $3",
)
.bind(room_id as i64)
.bind(limit as i64)
.bind(offset as i64)
.fetch_all(&lock)
.await?)
}
pub async fn get_record(
@@ -35,10 +42,10 @@ impl Database {
) -> Result<RecordRow, DatabaseError> {
let lock = self.db.read().await.clone().unwrap();
Ok(sqlx::query_as::<_, RecordRow>(
"SELECT * FROM records WHERE live_id = $1 and room_id = $2",
"SELECT * FROM records WHERE room_id = $1 and live_id = $2",
)
.bind(live_id)
.bind(room_id as i64)
.bind(live_id)
.fetch_one(&lock)
.await?)
}
@@ -147,4 +154,12 @@ impl Database {
.await?)
}
}
pub async fn get_record_disk_usage(&self) -> Result<u64, DatabaseError> {
let lock = self.db.read().await.clone().unwrap();
let result: (i64,) = sqlx::query_as("SELECT SUM(size) FROM records;")
.fetch_one(&lock)
.await?;
Ok(result.0 as u64)
}
}

View File

@@ -14,10 +14,27 @@ pub async fn get_config(state: state_type!()) -> Result<Config, ()> {
#[allow(dead_code)]
pub async fn set_cache_path(state: state_type!(), cache_path: String) -> Result<(), String> {
let old_cache_path = state.config.read().await.cache.clone();
log::info!(
"Try to set cache path: {} -> {}",
old_cache_path,
cache_path
);
if old_cache_path == cache_path {
return Ok(());
}
let old_cache_path_obj = std::path::Path::new(&old_cache_path);
let new_cache_path_obj = std::path::Path::new(&cache_path);
// check if new cache path is under old cache path
if new_cache_path_obj.starts_with(old_cache_path_obj) {
log::error!(
"New cache path is under old cache path: {} -> {}",
old_cache_path,
cache_path
);
return Err("New cache path cannot be under old cache path".to_string());
}
state.recorder_manager.set_migrating(true).await;
// stop and clear all recorders
state.recorder_manager.stop_all().await;
@@ -52,9 +69,11 @@ pub async fn set_cache_path(state: state_type!(), cache_path: String) -> Result<
if entry.is_dir() {
if let Err(e) = crate::handlers::utils::copy_dir_all(entry, &new_entry) {
log::error!("Copy old cache to new cache error: {}", e);
return Err(e.to_string());
}
} else if let Err(e) = std::fs::copy(entry, &new_entry) {
log::error!("Copy old cache to new cache error: {}", e);
return Err(e.to_string());
}
}
@@ -79,12 +98,30 @@ pub async fn set_cache_path(state: state_type!(), cache_path: String) -> Result<
#[cfg_attr(feature = "gui", tauri::command)]
#[allow(dead_code)]
pub async fn set_output_path(state: state_type!(), output_path: String) -> Result<(), ()> {
pub async fn set_output_path(state: state_type!(), output_path: String) -> Result<(), String> {
let mut config = state.config.write().await;
let old_output_path = config.output.clone();
log::info!(
"Try to set output path: {} -> {}",
old_output_path,
output_path
);
if old_output_path == output_path {
return Ok(());
}
let old_output_path_obj = std::path::Path::new(&old_output_path);
let new_output_path_obj = std::path::Path::new(&output_path);
// check if new output path is under old output path
if new_output_path_obj.starts_with(old_output_path_obj) {
log::error!(
"New output path is under old output path: {} -> {}",
old_output_path,
output_path
);
return Err("New output path cannot be under old output path".to_string());
}
// list all file and folder in old output
let mut old_output_entries = vec![];
if let Ok(entries) = std::fs::read_dir(&old_output_path) {
@@ -103,10 +140,12 @@ pub async fn set_output_path(state: state_type!(), output_path: String) -> Resul
// if entry is a folder
if entry.is_dir() {
if let Err(e) = crate::handlers::utils::copy_dir_all(entry, &new_entry) {
log::error!("Copy old cache to new cache error: {}", e);
log::error!("Copy old output to new output error: {}", e);
return Err(e.to_string());
}
} else if let Err(e) = std::fs::copy(entry, &new_entry) {
log::error!("Copy old cache to new cache error: {}", e);
log::error!("Copy old output to new output error: {}", e);
return Err(e.to_string());
}
}
@@ -114,10 +153,10 @@ pub async fn set_output_path(state: state_type!(), output_path: String) -> Resul
for entry in old_output_entries {
if entry.is_dir() {
if let Err(e) = std::fs::remove_dir_all(&entry) {
log::error!("Remove old cache error: {}", e);
log::error!("Remove old output error: {}", e);
}
} else if let Err(e) = std::fs::remove_file(&entry) {
log::error!("Remove old cache error: {}", e);
log::error!("Remove old output error: {}", e);
}
}
@@ -251,4 +290,4 @@ pub async fn update_user_agent(state: state_type!(), user_agent: String) -> Resu
log::info!("Updating user agent to {}", user_agent);
state.config.write().await.set_user_agent(&user_agent);
Ok(())
}
}

View File

@@ -121,8 +121,21 @@ pub async fn get_room_info(
}
#[cfg_attr(feature = "gui", tauri::command)]
pub async fn get_archives(state: state_type!(), room_id: u64) -> Result<Vec<RecordRow>, String> {
Ok(state.recorder_manager.get_archives(room_id).await?)
pub async fn get_archive_disk_usage(state: state_type!()) -> Result<u64, String> {
Ok(state.recorder_manager.get_archive_disk_usage().await?)
}
#[cfg_attr(feature = "gui", tauri::command)]
pub async fn get_archives(
state: state_type!(),
room_id: u64,
offset: u64,
limit: u64,
) -> Result<Vec<RecordRow>, String> {
Ok(state
.recorder_manager
.get_archives(room_id, offset, limit)
.await?)
}
#[cfg_attr(feature = "gui", tauri::command)]
@@ -196,6 +209,35 @@ pub async fn delete_archive(
Ok(())
}
#[cfg_attr(feature = "gui", tauri::command)]
pub async fn delete_archives(
state: state_type!(),
platform: String,
room_id: u64,
live_ids: Vec<String>,
) -> Result<(), String> {
let platform = PlatformType::from_str(&platform);
if platform.is_none() {
return Err("Unsupported platform".to_string());
}
state
.recorder_manager
.delete_archives(
platform.unwrap(),
room_id,
&live_ids.iter().map(|s| s.as_str()).collect::<Vec<&str>>(),
)
.await?;
state
.db
.new_message(
"删除历史缓存",
&format!("删除了房间 {} 的历史缓存 {}", room_id, live_ids.join(", ")),
)
.await?;
Ok(())
}
#[cfg_attr(feature = "gui", tauri::command)]
pub async fn get_danmu_record(
state: state_type!(),

View File

@@ -22,10 +22,11 @@ use crate::{
},
message::{delete_message, get_messages, read_message},
recorder::{
add_recorder, delete_archive, export_danmu, fetch_hls, generate_archive_subtitle,
get_archive, get_archive_subtitle, get_archives, get_danmu_record, get_recent_record,
get_recorder_list, get_room_info, get_today_record_count, get_total_length,
remove_recorder, send_danmaku, set_enable, ExportDanmuOptions,
add_recorder, delete_archive, delete_archives, export_danmu, fetch_hls,
generate_archive_subtitle, get_archive, get_archive_disk_usage, get_archive_subtitle,
get_archives, get_danmu_record, get_recent_record, get_recorder_list, get_room_info,
get_today_record_count, get_total_length, remove_recorder, send_danmaku, set_enable,
ExportDanmuOptions,
},
task::{delete_task, get_tasks},
utils::{console_log, get_disk_info, list_folder, DiskInfo},
@@ -495,17 +496,26 @@ async fn handler_get_room_info(
Ok(Json(ApiResponse::success(room_info)))
}
async fn handler_get_archive_disk_usage(
state: axum::extract::State<State>,
) -> Result<Json<ApiResponse<u64>>, ApiError> {
let disk_usage = get_archive_disk_usage(state.0).await?;
Ok(Json(ApiResponse::success(disk_usage)))
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct GetArchivesRequest {
room_id: u64,
offset: u64,
limit: u64,
}
async fn handler_get_archives(
state: axum::extract::State<State>,
Json(param): Json<GetArchivesRequest>,
) -> Result<Json<ApiResponse<Vec<RecordRow>>>, ApiError> {
let archives = get_archives(state.0, param.room_id).await?;
let archives = get_archives(state.0, param.room_id, param.offset, param.limit).await?;
Ok(Json(ApiResponse::success(archives)))
}
@@ -574,6 +584,22 @@ async fn handler_delete_archive(
Ok(Json(ApiResponse::success(())))
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct DeleteArchivesRequest {
platform: String,
room_id: u64,
live_ids: Vec<String>,
}
async fn handler_delete_archives(
state: axum::extract::State<State>,
Json(param): Json<DeleteArchivesRequest>,
) -> Result<Json<ApiResponse<()>>, ApiError> {
delete_archives(state.0, param.platform, param.room_id, param.live_ids).await?;
Ok(Json(ApiResponse::success(())))
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct GetDanmuRecordRequest {
@@ -1507,6 +1533,7 @@ pub async fn start_api_server(state: State) {
.route("/api/add_recorder", post(handler_add_recorder))
.route("/api/remove_recorder", post(handler_remove_recorder))
.route("/api/delete_archive", post(handler_delete_archive))
.route("/api/delete_archives", post(handler_delete_archives))
.route("/api/send_danmaku", post(handler_send_danmaku))
.route("/api/set_enable", post(handler_set_enable))
.route("/api/upload_procedure", post(handler_upload_procedure))
@@ -1584,6 +1611,10 @@ pub async fn start_api_server(state: State) {
.route("/api/get_room_info", post(handler_get_room_info))
.route("/api/get_archives", post(handler_get_archives))
.route("/api/get_archive", post(handler_get_archive))
.route(
"/api/get_archive_disk_usage",
post(handler_get_archive_disk_usage),
)
.route(
"/api/get_archive_subtitle",
post(handler_get_archive_subtitle),

View File

@@ -172,6 +172,18 @@ fn get_migrations() -> Vec<Migration> {
sql: r#"ALTER TABLE recorders ADD COLUMN extra TEXT;"#,
kind: MigrationKind::Up,
},
// add indexes
Migration {
version: 7,
description: "add_indexes",
sql: r#"
CREATE INDEX idx_records_live_id ON records (room_id, live_id);
CREATE INDEX idx_records_created_at ON records (room_id, created_at);
CREATE INDEX idx_videos_room_id ON videos (room_id);
CREATE INDEX idx_videos_created_at ON videos (created_at);
"#,
kind: MigrationKind::Up,
},
]
}
@@ -519,11 +531,13 @@ fn setup_invoke_handlers(builder: tauri::Builder<tauri::Wry>) -> tauri::Builder<
crate::handlers::recorder::add_recorder,
crate::handlers::recorder::remove_recorder,
crate::handlers::recorder::get_room_info,
crate::handlers::recorder::get_archive_disk_usage,
crate::handlers::recorder::get_archives,
crate::handlers::recorder::get_archive,
crate::handlers::recorder::get_archive_subtitle,
crate::handlers::recorder::generate_archive_subtitle,
crate::handlers::recorder::delete_archive,
crate::handlers::recorder::delete_archives,
crate::handlers::recorder::get_danmu_record,
crate::handlers::recorder::export_danmu,
crate::handlers::recorder::send_danmaku,

View File

@@ -241,13 +241,14 @@ impl BiliRecorder {
});
}
// just doing reset
// just doing reset, cuz live status is changed
self.reset().await;
}
*self.live_status.write().await = live_status;
if !live_status {
// reset cuz live is ended
self.reset().await;
return false;
@@ -387,7 +388,7 @@ impl BiliRecorder {
let danmu_stream = DanmuStream::new(ProviderType::BiliBili, &cookies, room_id).await;
if danmu_stream.is_err() {
let err = danmu_stream.err().unwrap();
log::error!("Failed to create danmu stream: {}", err);
log::error!("[{}]Failed to create danmu stream: {}", self.room_id, err);
return Err(super::errors::RecorderError::DanmuStreamError { err });
}
let danmu_stream = danmu_stream.unwrap();
@@ -414,7 +415,7 @@ impl BiliRecorder {
}
}
} else {
log::error!("Failed to receive danmu message");
log::error!("[{}]Failed to receive danmu message", self.room_id);
return Err(super::errors::RecorderError::DanmuStreamError {
err: danmu_stream::DanmuStreamError::WebsocketError {
err: "Failed to receive danmu message".to_string(),
@@ -453,9 +454,14 @@ impl BiliRecorder {
})
}
Err(e) => {
log::error!("Failed fetching index content from {}", stream.index());
log::error!(
"Master manifest: {}",
"[{}]Failed fetching index content from {}",
self.room_id,
stream.index()
);
log::error!(
"[{}]Master manifest: {}",
self.room_id,
self.master_manifest.read().await.as_ref().unwrap()
);
Err(super::errors::RecorderError::BiliClientError { err: e })
@@ -491,7 +497,11 @@ impl BiliRecorder {
header_url = captures.get(0).unwrap().as_str().to_string();
}
if header_url.is_empty() {
log::warn!("Parse header url failed: {}", index_content);
log::warn!(
"[{}]Parse header url failed: {}",
self.room_id,
index_content
);
}
Ok(header_url)
@@ -627,11 +637,16 @@ impl BiliRecorder {
{
Ok(size) => {
if size == 0 {
log::error!("Download header failed: {}", full_header_url);
log::error!(
"[{}]Download header failed: {}",
self.room_id,
full_header_url
);
// Clean up empty directory since header download failed
if let Err(cleanup_err) = tokio::fs::remove_dir_all(&work_dir).await {
log::warn!(
"Failed to cleanup empty work directory {}: {}",
"[{}]Failed to cleanup empty work directory {}: {}",
self.room_id,
work_dir,
cleanup_err
);
@@ -684,11 +699,12 @@ impl BiliRecorder {
});
}
Err(e) => {
log::error!("Download header failed: {}", e);
log::error!("[{}]Download header failed: {}", self.room_id, e);
// Clean up empty directory since header download failed
if let Err(cleanup_err) = tokio::fs::remove_dir_all(&work_dir).await {
log::warn!(
"Failed to cleanup empty work directory {}: {}",
"[{}]Failed to cleanup empty work directory {}: {}",
self.room_id,
work_dir,
cleanup_err
);
@@ -735,7 +751,9 @@ impl BiliRecorder {
}
match playlist {
Playlist::MasterPlaylist(pl) => log::debug!("Master playlist:\n{:?}", pl),
Playlist::MasterPlaylist(pl) => {
log::debug!("[{}]Master playlist:\n{:?}", self.room_id, pl)
}
Playlist::MediaPlaylist(pl) => {
let mut new_segment_fetched = false;
let last_sequence = self
@@ -781,7 +799,12 @@ impl BiliRecorder {
let ts_url = current_stream.ts_url(&ts.uri);
if Url::parse(&ts_url).is_err() {
log::error!("Ts url is invalid. ts_url={} original={}", ts_url, ts.uri);
log::error!(
"[{}]Ts url is invalid. ts_url={} original={}",
self.room_id,
ts_url,
ts.uri
);
continue;
}
@@ -845,7 +868,7 @@ impl BiliRecorder {
loop {
if retry > 3 {
log::error!("Download ts failed after retry");
log::error!("[{}]Download ts failed after retry", self.room_id);
// Clean up empty directory if first ts download failed for non-FMP4
if is_first_record
@@ -855,7 +878,8 @@ impl BiliRecorder {
if let Err(cleanup_err) = tokio::fs::remove_dir_all(&work_dir).await
{
log::warn!(
"Failed to cleanup empty work directory {}: {}",
"[{}]Failed to cleanup empty work directory {}: {}",
self.room_id,
work_dir,
cleanup_err
);
@@ -872,7 +896,10 @@ impl BiliRecorder {
{
Ok(size) => {
if size == 0 {
log::error!("Segment with size 0, stream might be corrupted");
log::error!(
"[{}]Segment with size 0, stream might be corrupted",
self.room_id
);
// Clean up empty directory if first ts download failed for non-FMP4
if is_first_record
@@ -883,7 +910,8 @@ impl BiliRecorder {
tokio::fs::remove_dir_all(&work_dir).await
{
log::warn!(
"Failed to cleanup empty work directory {}: {}",
"[{}]Failed to cleanup empty work directory {}: {}",
self.room_id,
work_dir,
cleanup_err
);
@@ -934,20 +962,30 @@ impl BiliRecorder {
{
Ok(duration) => {
log::debug!(
"Precise TS segment duration: {}s (original: {}s)",
"[{}]Precise TS segment duration: {}s (original: {}s)",
self.room_id,
duration,
ts_length
);
duration
}
Err(e) => {
log::warn!("Failed to get precise TS duration for {}: {}, using fallback", file_name, e);
log::warn!(
"[{}]Failed to get precise TS duration for {}: {}, using fallback",
self.room_id,
file_name,
e
);
ts_length
}
}
} else {
// FMP4 segment without BILI-AUX info, use fallback
log::debug!("No BILI-AUX data available for FMP4 segment {}, using target duration", file_name);
log::debug!(
"[{}]No BILI-AUX data available for FMP4 segment {}, using target duration",
self.room_id,
file_name
);
ts_length
};
@@ -978,7 +1016,12 @@ impl BiliRecorder {
}
Err(e) => {
retry += 1;
log::warn!("Download ts failed, retry {}: {}", retry, e);
log::warn!(
"[{}]Download ts failed, retry {}: {}",
self.room_id,
retry,
e
);
// If this is the last retry and it's the first record for non-FMP4, clean up
if retry > 3
@@ -990,7 +1033,8 @@ impl BiliRecorder {
tokio::fs::remove_dir_all(&work_dir).await
{
log::warn!(
"Failed to cleanup empty work directory {}: {}",
"[{}]Failed to cleanup empty work directory {}: {}",
self.room_id,
work_dir,
cleanup_err
);
@@ -1024,7 +1068,10 @@ impl BiliRecorder {
} else {
// if index content is not changed for a long time, we should return a error to fetch a new stream
if *self.last_update.read().await < Utc::now().timestamp() - 10 {
log::error!("Stream content is not updating for 10s, maybe not started yet or not closed properly.");
log::error!(
"[{}]Stream content is not updating for 10s, maybe not started yet or not closed properly.",
self.room_id
);
return Err(super::errors::RecorderError::FreezedStream {
stream: current_stream,
});
@@ -1034,7 +1081,11 @@ impl BiliRecorder {
if let Some(entry_store) = self.entry_store.read().await.as_ref() {
if let Some(last_ts) = entry_store.last_ts() {
if last_ts < Utc::now().timestamp() - 10 {
log::error!("Stream is too slow, last entry ts is at {}", last_ts);
log::error!(
"[{}]Stream is too slow, last entry ts is at {}",
self.room_id,
last_ts
);
return Err(super::errors::RecorderError::SlowStream {
stream: current_stream,
});
@@ -1054,7 +1105,7 @@ impl BiliRecorder {
.as_ref()
.is_some_and(|s| s.expire - Utc::now().timestamp() < pre_offset as i64)
{
log::info!("Stream is nearly expired");
log::info!("[{}]Stream is nearly expired", self.room_id);
return Err(super::errors::RecorderError::StreamExpired {
stream: current_stream.unwrap(),
});
@@ -1103,17 +1154,18 @@ impl super::Recorder for BiliRecorder {
async fn run(&self) {
let self_clone = self.clone();
*self.danmu_task.lock().await = Some(tokio::spawn(async move {
log::info!("Start fetching danmu for room {}", self_clone.room_id);
log::info!("[{}]Start fetching danmu", self_clone.room_id);
let _ = self_clone.danmu().await;
}));
let self_clone = self.clone();
*self.record_task.lock().await = Some(tokio::spawn(async move {
log::info!("Start running recorder for room {}", self_clone.room_id);
log::info!("[{}]Start running recorder", self_clone.room_id);
while !*self_clone.quit.lock().await {
let mut connection_fail_count = 0;
if self_clone.check_status().await {
// Live status is ok, start recording.
let mut continue_record = false;
while self_clone.should_record().await {
match self_clone.update_entries().await {
Ok(ms) => {
@@ -1137,11 +1189,20 @@ impl super::Recorder for BiliRecorder {
connection_fail_count =
std::cmp::min(5, connection_fail_count + 1);
}
// if error is stream expired, we should not break, cuz we need to fetch a new stream
if let RecorderError::StreamExpired { stream: _ } = e {
continue_record = true;
}
break;
}
}
}
if continue_record {
log::info!("[{}]Continue recording without reset", self_clone.room_id);
continue;
}
// whatever error happened during update entries, reset to start another recording.
*self_clone.is_recording.write().await = false;
self_clone.reset().await;
@@ -1161,7 +1222,7 @@ impl super::Recorder for BiliRecorder {
}
async fn stop(&self) {
log::debug!("Stop recorder for room {}", self.room_id);
log::debug!("[{}]Stop recorder", self.room_id);
*self.quit.lock().await = true;
if let Some(danmu_task) = self.danmu_task.lock().await.as_mut() {
let _ = danmu_task.abort();
@@ -1169,7 +1230,7 @@ impl super::Recorder for BiliRecorder {
if let Some(record_task) = self.record_task.lock().await.as_mut() {
let _ = record_task.abort();
}
log::info!("Recorder for room {} quit.", self.room_id);
log::info!("[{}]Recorder quit.", self.room_id);
}
/// timestamp is the id of live stream
@@ -1182,7 +1243,10 @@ impl super::Recorder for BiliRecorder {
}
async fn master_m3u8(&self, live_id: &str, start: i64, end: i64) -> String {
log::info!("Master manifest for {live_id} {start}-{end}");
log::info!(
"[{}]Master manifest for {live_id} {start}-{end}",
self.room_id
);
let offset = self.first_segment_ts(live_id).await / 1000;
let mut m3u8_content = "#EXTM3U\n".to_string();
m3u8_content += "#EXT-X-VERSION:6\n";
@@ -1259,7 +1323,11 @@ impl super::Recorder for BiliRecorder {
live_id,
"danmu.txt"
);
log::debug!("loading danmu cache from {}", cache_file_path);
log::debug!(
"[{}]loading danmu cache from {}",
self.room_id,
cache_file_path
);
let storage = DanmuStorage::new(&cache_file_path).await;
if storage.is_none() {
return Ok(Vec::new());
@@ -1308,7 +1376,11 @@ impl super::Recorder for BiliRecorder {
let m3u8_index_file_path = format!("{}/{}", work_dir, "tmp.m3u8");
let m3u8_content = self.m3u8_content(live_id, 0, 0).await;
tokio::fs::write(&m3u8_index_file_path, m3u8_content).await?;
log::info!("M3U8 index file generated: {}", m3u8_index_file_path);
log::info!(
"[{}]M3U8 index file generated: {}",
self.room_id,
m3u8_index_file_path
);
// generate a tmp clip file
let clip_file_path = format!("{}/{}", work_dir, "tmp.mp4");
if let Err(e) = crate::ffmpeg::clip_from_m3u8(
@@ -1324,7 +1396,11 @@ impl super::Recorder for BiliRecorder {
error: e.to_string(),
});
}
log::info!("Temp clip file generated: {}", clip_file_path);
log::info!(
"[{}]Temp clip file generated: {}",
self.room_id,
clip_file_path
);
// generate subtitle file
let config = self.config.read().await;
let result = crate::ffmpeg::generate_video_subtitle(
@@ -1344,7 +1420,7 @@ impl super::Recorder for BiliRecorder {
error: e.to_string(),
});
}
log::info!("Subtitle generated");
log::info!("[{}]Subtitle generated", self.room_id);
let result = result.unwrap();
let subtitle_content = result
.subtitle_content
@@ -1353,11 +1429,11 @@ impl super::Recorder for BiliRecorder {
.collect::<Vec<String>>()
.join("");
subtitle_file.write_all(subtitle_content.as_bytes()).await?;
log::info!("Subtitle file written");
log::info!("[{}]Subtitle file written", self.room_id);
// remove tmp file
tokio::fs::remove_file(&m3u8_index_file_path).await?;
tokio::fs::remove_file(&clip_file_path).await?;
log::info!("Tmp file removed");
log::info!("[{}]Tmp file removed", self.room_id);
Ok(subtitle_content)
}

View File

@@ -613,8 +613,17 @@ impl RecorderManager {
}
}
pub async fn get_archives(&self, room_id: u64) -> Result<Vec<RecordRow>, RecorderManagerError> {
Ok(self.db.get_records(room_id).await?)
pub async fn get_archive_disk_usage(&self) -> Result<u64, RecorderManagerError> {
Ok(self.db.get_record_disk_usage().await?)
}
pub async fn get_archives(
&self,
room_id: u64,
offset: u64,
limit: u64,
) -> Result<Vec<RecordRow>, RecorderManagerError> {
Ok(self.db.get_records(room_id, offset, limit).await?)
}
pub async fn get_archive(
@@ -681,6 +690,19 @@ impl RecorderManager {
Ok(())
}
pub async fn delete_archives(
&self,
platform: PlatformType,
room_id: u64,
live_ids: &[&str],
) -> Result<(), RecorderManagerError> {
log::info!("Deleting archives in batch: {:?}", live_ids);
for live_id in live_ids {
self.delete_archive(platform, room_id, live_id).await?;
}
Ok(())
}
pub async fn get_danmu(
&self,
platform: PlatformType,

View File

@@ -28,7 +28,7 @@ const get_accounts = tool(
name: "get_accounts",
description: "Get all available accounts",
schema: z.object({}),
}
},
);
// @ts-ignore
@@ -47,11 +47,11 @@ const remove_account = tool(
platform: z
.string()
.describe(
`The platform of the account. Can be ${platform_list.join(", ")}`
`The platform of the account. Can be ${platform_list.join(", ")}`,
),
uid: z.number().describe("The uid of the account"),
}),
}
},
);
// @ts-ignore
@@ -79,16 +79,16 @@ const add_recorder = tool(
platform: z
.string()
.describe(
`The platform of the recorder. Can be ${platform_list.join(", ")}`
`The platform of the recorder. Can be ${platform_list.join(", ")}`,
),
room_id: z.number().describe("The room id of the recorder"),
extra: z
.string()
.describe(
"The extra of the recorder, should be empty for bilibili, and the sec_user_id for douyin"
"The extra of the recorder, should be empty for bilibili, and the sec_user_id for douyin",
),
}),
}
},
);
// @ts-ignore
@@ -107,11 +107,11 @@ const remove_recorder = tool(
platform: z
.string()
.describe(
`The platform of the recorder. Can be ${platform_list.join(", ")}`
`The platform of the recorder. Can be ${platform_list.join(", ")}`,
),
room_id: z.number().describe("The room id of the recorder"),
}),
}
},
);
// @ts-ignore
@@ -124,7 +124,7 @@ const get_recorder_list = tool(
name: "get_recorder_list",
description: "Get the list of all available recorders",
schema: z.object({}),
}
},
);
// @ts-ignore
@@ -140,14 +140,24 @@ const get_recorder_info = tool(
platform: z.string().describe("The platform of the room"),
room_id: z.number().describe("The room id of the room"),
}),
}
},
);
// @ts-ignore
const get_archives = tool(
async ({ room_id }: { room_id: number }) => {
async ({
room_id,
offset,
limit,
}: {
room_id: number;
offset: number;
limit: number;
}) => {
const archives = (await invoke("get_archives", {
roomId: room_id,
offset,
limit,
})) as any[];
// hide cover in result
return {
@@ -165,8 +175,10 @@ const get_archives = tool(
description: "Get the list of all archives of a recorder",
schema: z.object({
room_id: z.number().describe("The room id of the recorder"),
offset: z.number().describe("The offset of the archives"),
limit: z.number().describe("The limit of the archives"),
}),
}
},
);
// @ts-ignore
@@ -190,7 +202,7 @@ const get_archive = tool(
room_id: z.number().describe("The room id of the recorder"),
live_id: z.string().describe("The live id of the archive"),
}),
}
},
);
// @ts-ignore
@@ -218,12 +230,45 @@ const delete_archive = tool(
platform: z
.string()
.describe(
`The platform of the recorder. Can be ${platform_list.join(", ")}`
`The platform of the recorder. Can be ${platform_list.join(", ")}`,
),
room_id: z.number().describe("The room id of the recorder"),
live_id: z.string().describe("The live id of the archive"),
}),
}
},
);
// @ts-ignore
const delete_archives = tool(
async ({
platform,
room_id,
live_ids,
}: {
platform: string;
room_id: number;
live_ids: string[];
}) => {
const result = await invoke("delete_archives", {
platform,
roomId: room_id,
liveIds: live_ids,
});
return result;
},
{
name: "delete_archives",
description: "Delete multiple archives",
schema: z.object({
platform: z
.string()
.describe(
`The platform of the recorder. Can be ${platform_list.join(", ")}`,
),
room_id: z.number().describe("The room id of the recorder"),
live_ids: z.array(z.string()).describe("The live ids of the archives"),
}),
},
);
// @ts-ignore
@@ -243,7 +288,7 @@ const get_background_tasks = tool(
name: "get_background_tasks",
description: "Get the list of all background tasks",
schema: z.object({}),
}
},
);
// @ts-ignore
@@ -258,7 +303,7 @@ const delete_background_task = tool(
schema: z.object({
id: z.string().describe("The id of the task"),
}),
}
},
);
// @ts-ignore
@@ -280,7 +325,7 @@ const get_videos = tool(
schema: z.object({
room_id: z.number().describe("The room id of the room"),
}),
}
},
);
// @ts-ignore
@@ -300,7 +345,7 @@ const get_all_videos = tool(
name: "get_all_videos",
description: "Get the list of all videos from all rooms",
schema: z.object({}),
}
},
);
// @ts-ignore
@@ -320,7 +365,7 @@ const get_video = tool(
schema: z.object({
id: z.number().describe("The id of the video"),
}),
}
},
);
// @ts-ignore
@@ -337,7 +382,7 @@ const get_video_cover = tool(
schema: z.object({
id: z.number().describe("The id of the video"),
}),
}
},
);
// @ts-ignore
@@ -352,7 +397,7 @@ const delete_video = tool(
schema: z.object({
id: z.number().describe("The id of the video"),
}),
}
},
);
// @ts-ignore
@@ -366,7 +411,7 @@ const get_video_typelist = tool(
description:
"Get the list of all video types视频分区 that can be selected on bilibili platform",
schema: z.object({}),
}
},
);
// @ts-ignore
@@ -382,7 +427,7 @@ const get_video_subtitle = tool(
schema: z.object({
id: z.number().describe("The id of the video"),
}),
}
},
);
// @ts-ignore
@@ -397,7 +442,7 @@ const generate_video_subtitle = tool(
schema: z.object({
id: z.number().describe("The id of the video"),
}),
}
},
);
// @ts-ignore
@@ -417,10 +462,10 @@ const encode_video_subtitle = tool(
srt_style: z
.string()
.describe(
"The style of the subtitle, it is used for ffmpeg -vf force_style, it must be a valid srt style"
"The style of the subtitle, it is used for ffmpeg -vf force_style, it must be a valid srt style",
),
}),
}
},
);
// @ts-ignore
@@ -474,7 +519,7 @@ const post_video_to_bilibili = tool(
uid: z
.number()
.describe(
"The uid of the user, it should be one of the uid in the bilibili accounts"
"The uid of the user, it should be one of the uid in the bilibili accounts",
),
room_id: z.number().describe("The room id of the room"),
video_id: z.number().describe("The id of the video"),
@@ -483,15 +528,15 @@ const post_video_to_bilibili = tool(
tag: z
.string()
.describe(
"The tag of the video, multiple tags should be separated by comma"
"The tag of the video, multiple tags should be separated by comma",
),
tid: z
.number()
.describe(
"The tid of the video, it is the id of the video type, you can use get_video_typelist to get the list of all video types"
"The tid of the video, it is the id of the video type, you can use get_video_typelist to get the list of all video types",
),
}),
}
},
);
// @ts-ignore
@@ -529,7 +574,7 @@ const get_danmu_record = tool(
room_id: z.number().describe("The room id of the room"),
live_id: z.string().describe("The live id of the live"),
}),
}
},
);
// @ts-ignore
@@ -556,7 +601,7 @@ const clip_range = tool(
reason: z
.string()
.describe(
"The reason for the clip range, it will be shown to the user. You must offer a summary of the clip range content and why you choose this clip range."
"The reason for the clip range, it will be shown to the user. You must offer a summary of the clip range content and why you choose this clip range.",
),
clip_range_params: z.object({
room_id: z.number().describe("The room id of the room"),
@@ -568,12 +613,12 @@ const clip_range = tool(
danmu: z
.boolean()
.describe(
"Whether to encode danmu, encode danmu will take a lot of time, so it is recommended to set it to false"
"Whether to encode danmu, encode danmu will take a lot of time, so it is recommended to set it to false",
),
local_offset: z
.number()
.describe(
"The offset for danmu timestamp, it is used to correct the timestamp of danmu"
"The offset for danmu timestamp, it is used to correct the timestamp of danmu",
),
title: z.string().describe("The title of the clip"),
cover: z.string().describe("Must be empty"),
@@ -581,11 +626,11 @@ const clip_range = tool(
fix_encoding: z
.boolean()
.describe(
"Whether to fix the encoding of the clip, it will take a lot of time, so it is recommended to set it to false"
"Whether to fix the encoding of the clip, it will take a lot of time, so it is recommended to set it to false",
),
}),
}),
}
},
);
// @ts-ignore
@@ -622,7 +667,7 @@ const get_recent_record = tool(
offset: z.number().describe("The offset of the records"),
limit: z.number().describe("The limit of the records"),
}),
}
},
);
// @ts-ignore
@@ -650,7 +695,7 @@ const get_recent_record_all = tool(
offset: z.number().describe("The offset of the records"),
limit: z.number().describe("The limit of the records"),
}),
}
},
);
// @ts-ignore
@@ -665,7 +710,7 @@ const generic_ffmpeg_command = tool(
schema: z.object({
args: z.array(z.string()).describe("The arguments of the ffmpeg command"),
}),
}
},
);
// @ts-ignore
@@ -680,7 +725,7 @@ const open_clip = tool(
schema: z.object({
video_id: z.number().describe("The id of the video"),
}),
}
},
);
// @ts-ignore
@@ -695,7 +740,7 @@ const list_folder = tool(
schema: z.object({
path: z.string().describe("The path of the folder"),
}),
}
},
);
// @ts-ignore
@@ -725,7 +770,7 @@ const get_archive_subtitle = tool(
room_id: z.number().describe("The room id of the archive"),
live_id: z.string().describe("The live id of the archive"),
}),
}
},
);
// @ts-ignore
@@ -755,7 +800,7 @@ const generate_archive_subtitle = tool(
room_id: z.number().describe("The room id of the archive"),
live_id: z.string().describe("The live id of the archive"),
}),
}
},
);
const tools = [

View File

@@ -1,5 +1,10 @@
<script lang="ts">
import { invoke, convertCoverSrc } from "../lib/invoker";
import {
invoke,
convertCoverSrc,
TAURI_ENV,
convertFileSrc,
} from "../lib/invoker";
import type { VideoItem } from "../lib/interface";
import { onMount } from "svelte";
import {
@@ -17,6 +22,7 @@
Home,
FileVideo,
Scissors,
Download,
} from "lucide-svelte";
let videos: VideoItem[] = [];
@@ -268,6 +274,16 @@
}
}
async function exportVideo(video: VideoItem) {
// download video
const video_url = await convertFileSrc(video.file);
const video_name = video.title;
const a = document.createElement("a");
a.href = video_url;
a.download = video_name;
a.click();
}
import ImportVideoDialog from "../lib/ImportVideoDialog.svelte";
</script>
@@ -682,7 +698,15 @@
>
<Play class="table-icon" />
</button>
{#if !TAURI_ENV}
<button
class="p-1.5 text-gray-600 dark:text-gray-400 hover:text-blue-600 dark:hover:text-blue-400 hover:bg-blue-50 dark:hover:bg-blue-900/20 rounded transition-colors"
title="导出"
on:click={async () => await exportVideo(video)}
>
<Download class="table-icon" />
</button>
{/if}
<button
class="p-1.5 text-gray-600 dark:text-gray-400 hover:text-red-600 dark:hover:text-red-400 hover:bg-red-50 dark:hover:bg-red-900/20 rounded transition-colors"
title="删除"

View File

@@ -77,22 +77,83 @@
let archiveRoom = null;
let archives: RecordItem[] = [];
// 分页相关状态
let currentPage = 0;
let pageSize = 20;
let hasMore = true;
let isLoading = false;
let loadError = "";
async function showArchives(room_id: number) {
// 重置分页状态
currentPage = 0;
archives = [];
hasMore = true;
isLoading = false;
loadError = "";
updateArchives();
archiveModal = true;
console.log(archives);
}
async function updateArchives() {
let updated_archives = (await invoke("get_archives", {
roomId: archiveRoom.room_id,
})) as RecordItem[];
updated_archives.sort((a, b) => {
return (
new Date(b.created_at).getTime() - new Date(a.created_at).getTime()
);
});
archives = updated_archives;
if (isLoading || !hasMore) return;
isLoading = true;
try {
let new_archives = (await invoke("get_archives", {
roomId: archiveRoom.room_id,
offset: currentPage * pageSize,
limit: pageSize,
})) as RecordItem[];
// 如果是第一页,直接替换;否则追加数据
if (currentPage === 0) {
archives = new_archives;
} else {
archives = [...archives, ...new_archives];
}
// 检查是否还有更多数据
hasMore = new_archives.length === pageSize;
// 按时间排序
archives.sort((a, b) => {
return (
new Date(b.created_at).getTime() - new Date(a.created_at).getTime()
);
});
// 更新总数(如果后端支持的话)
// totalCount = await invoke("get_archives_count", { roomId: archiveRoom.room_id });
currentPage++;
} catch (error) {
console.error("Failed to load archives:", error);
loadError = "加载失败,请重试";
} finally {
isLoading = false;
}
}
async function loadMoreArchives() {
await updateArchives();
}
function handleScroll(event: Event) {
const target = event.target as HTMLElement;
const { scrollTop, scrollHeight, clientHeight } = target;
// 当滚动到距离底部100px时自动加载更多
if (
scrollHeight - scrollTop - clientHeight < 100 &&
hasMore &&
!isLoading
) {
loadMoreArchives();
}
}
function format_ts(ts_string: string) {
@@ -643,7 +704,10 @@
</button>
</div>
<div class="flex-1 overflow-auto custom-scrollbar-light">
<div
class="flex-1 overflow-auto custom-scrollbar-light"
on:scroll={handleScroll}
>
<div class="p-6">
<div class="overflow-x-auto custom-scrollbar-light">
<table class="w-full">
@@ -740,6 +804,11 @@
liveId: archive.live_id,
})
.then(async () => {
// 删除后重新加载第一页数据
currentPage = 0;
archives = [];
hasMore = true;
loadError = "";
await updateArchives();
})
.catch((e) => {
@@ -756,6 +825,48 @@
</tbody>
</table>
</div>
<!-- 加载更多区域 -->
<div class="mt-6 flex justify-center">
{#if loadError}
<div class="text-red-500 dark:text-red-400 text-sm mb-3">
{loadError}
<button
class="ml-2 text-blue-500 hover:text-blue-600 underline"
on:click={() => {
loadError = "";
loadMoreArchives();
}}
>
重试
</button>
</div>
{/if}
{#if isLoading && currentPage === 0}
<div
class="flex items-center space-x-2 text-gray-500 dark:text-gray-400"
>
<div
class="animate-spin rounded-full h-5 w-5 border-b-2 border-blue-500"
></div>
<span>加载中...</span>
</div>
{:else if isLoading}
<div
class="flex items-center space-x-2 text-gray-500 dark:text-gray-400"
>
<div
class="animate-spin rounded-full h-4 w-4 border-b-2 border-blue-500"
></div>
<span>加载更多...</span>
</div>
{:else if archives.length === 0 && !isLoading}
<div class="text-gray-500 dark:text-gray-400 text-sm">
暂无录制记录
</div>
{/if}
</div>
</div>
</div>
</div>

View File

@@ -77,10 +77,14 @@
async function handleOutputChange() {
const new_folder = await browse_folder();
if (new_folder) {
setting_model.output = new_folder;
await invoke("set_output_path", {
outputPath: setting_model.output,
});
try {
await invoke("set_output_path", {
outputPath: new_folder,
});
setting_model.output = new_folder;
} catch (e) {
alert(e);
}
}
}
@@ -92,10 +96,14 @@
showModal = false;
const new_folder = await browse_folder();
if (new_folder) {
setting_model.cache = new_folder;
await invoke("set_cache_path", {
cachePath: setting_model.cache,
});
try {
await invoke("set_cache_path", {
cachePath: new_folder,
});
setting_model.cache = new_folder;
} catch (e) {
alert(e);
}
}
}

View File

@@ -45,11 +45,8 @@
summary = (await invoke("get_recorder_list")) as RecorderList;
total = summary.count;
online = summary.recorders.filter((r) => r.live_status).length;
let new_disk_usage = 0;
for (const recorder of summary.recorders) {
new_disk_usage += await get_disk_usage(recorder.room_id);
}
disk_usage = new_disk_usage;
disk_usage = await get_archive_disk_usage();
// get disk info
disk_info = await invoke("get_disk_info");
@@ -129,15 +126,9 @@
update_summary();
setInterval(update_summary, INTERVAL);
async function get_disk_usage(room_id: number) {
let ds = 0;
const archives = (await invoke("get_archives", {
roomId: room_id,
})) as RecordItem[];
for (const archive of archives) {
ds += archive.size;
}
return ds;
async function get_archive_disk_usage() {
const total_size = (await invoke("get_archive_disk_usage")) as number;
return total_size;
}
async function get_total_length(): Promise<number> {