first commit

This commit is contained in:
wsq
2026-05-13 21:58:19 +08:00
commit 0167c66cb7
1475 changed files with 233414 additions and 0 deletions
@@ -0,0 +1,60 @@
import { describe, expect, it } from 'vitest'
import {
isGlobalAnalyzeTaskRunning,
resolveGlobalAnalyzeCompletion,
} from '@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/assets/hooks/useAssetsGlobalActions'
describe('assets global actions task state helpers', () => {
it('treats queued and processing analyze task as running', () => {
expect(isGlobalAnalyzeTaskRunning({
phase: 'queued',
runningTaskId: 'task-1',
lastError: null,
})).toBe(true)
expect(isGlobalAnalyzeTaskRunning({
phase: 'processing',
runningTaskId: 'task-1',
lastError: null,
})).toBe(true)
})
it('keeps completion idle when there is no previously running task', () => {
expect(resolveGlobalAnalyzeCompletion(null, {
phase: 'completed',
runningTaskId: null,
lastError: null,
})).toEqual({
status: 'idle',
finishedTaskId: null,
errorMessage: null,
})
})
it('marks previously running task as succeeded once runtime state stops running', () => {
expect(resolveGlobalAnalyzeCompletion('task-2', {
phase: 'completed',
runningTaskId: null,
lastError: null,
})).toEqual({
status: 'succeeded',
finishedTaskId: 'task-2',
errorMessage: null,
})
})
it('surfaces failed completion message from task state', () => {
expect(resolveGlobalAnalyzeCompletion('task-3', {
phase: 'failed',
runningTaskId: null,
lastError: {
code: 'MODEL_NOT_CONFIGURED',
message: 'No model configured',
},
})).toEqual({
status: 'failed',
finishedTaskId: 'task-3',
errorMessage: 'No model configured',
})
})
})
@@ -0,0 +1,76 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const {
useQueryClientMock,
useMutationMock,
requestJsonWithErrorMock,
} = vi.hoisted(() => ({
useQueryClientMock: vi.fn(() => ({ invalidateQueries: vi.fn() })),
useMutationMock: vi.fn((options: unknown) => options),
requestJsonWithErrorMock: vi.fn(),
}))
vi.mock('@tanstack/react-query', () => ({
useQueryClient: () => useQueryClientMock(),
useMutation: (options: unknown) => useMutationMock(options),
}))
vi.mock('@/lib/query/mutations/mutation-shared', async () => {
const actual = await vi.importActual<typeof import('@/lib/query/mutations/mutation-shared')>(
'@/lib/query/mutations/mutation-shared',
)
return {
...actual,
invalidateQueryTemplates: vi.fn(),
requestJsonWithError: requestJsonWithErrorMock,
}
})
import { useUpdateProjectCharacterVoiceSettings } from '@/lib/query/mutations/character-voice-mutations'
interface UpdateVoiceMutation {
mutationFn: (variables: {
characterId: string
voiceType: 'qwen-designed' | 'uploaded' | 'custom' | null
voiceId?: string
customVoiceUrl?: string
}) => Promise<unknown>
}
describe('project character voice mutations', () => {
beforeEach(() => {
useQueryClientMock.mockClear()
useMutationMock.mockClear()
requestJsonWithErrorMock.mockReset()
requestJsonWithErrorMock.mockResolvedValue({ success: true })
})
it('routes voice setting updates to the character-voice endpoint after designed voice save', async () => {
const mutation = useUpdateProjectCharacterVoiceSettings('project-1') as unknown as UpdateVoiceMutation
await mutation.mutationFn({
characterId: 'character-1',
voiceType: 'qwen-designed',
voiceId: 'voice-1',
customVoiceUrl: 'https://example.com/audio.wav',
})
expect(requestJsonWithErrorMock).toHaveBeenCalledTimes(1)
expect(requestJsonWithErrorMock).toHaveBeenCalledWith(
'/api/assets/character-1',
{
method: 'PATCH',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
scope: 'project',
kind: 'character',
projectId: 'project-1',
voiceType: 'qwen-designed',
voiceId: 'voice-1',
customVoiceUrl: 'https://example.com/audio.wav',
}),
},
'更新音色失败',
)
})
})
@@ -0,0 +1,52 @@
import { describe, expect, it } from 'vitest'
import {
buildVideoSubmissionKey,
createVideoSubmissionBaseline,
shouldResolveVideoSubmissionLock,
} from '@/lib/novel-promotion/stages/video-stage-runtime/immediate-video-submission'
describe('immediate video submission lock', () => {
it('regenerating an existing video -> keeps local lock until task state or output changes', () => {
const panel = {
panelId: 'panel-1',
storyboardId: 'storyboard-1',
panelIndex: 0,
videoUrl: 'https://example.com/original.mp4',
videoErrorMessage: null,
videoTaskRunning: false,
}
const baseline = createVideoSubmissionBaseline(panel)
expect(buildVideoSubmissionKey(panel)).toBe('panel-1')
expect(
shouldResolveVideoSubmissionLock(
{
...panel,
videoTaskRunning: false,
},
baseline,
baseline.startedAt + 1_000,
),
).toBe(false)
expect(
shouldResolveVideoSubmissionLock(
{
...panel,
videoTaskRunning: true,
},
baseline,
baseline.startedAt + 1_000,
),
).toBe(true)
expect(
shouldResolveVideoSubmissionLock(
{
...panel,
videoUrl: 'https://example.com/regenerated.mp4',
},
baseline,
baseline.startedAt + 1_000,
),
).toBe(true)
})
})
@@ -0,0 +1,33 @@
import { describe, expect, it } from 'vitest'
import { buildInsertPanelLocationsDescription } from '@/lib/novel-promotion/insert-panel-prompt-context'
describe('insert panel prompt context', () => {
it('injects available slots for related selected location images', () => {
const text = buildInsertPanelLocationsDescription(
[
{
name: '餐厅',
images: [
{
isSelected: true,
description: '长方形饭桌位于画面中央',
availableSlots: JSON.stringify([
'饭桌左侧靠桌边的位置',
]),
},
],
},
{
name: '客厅',
images: [{ isSelected: true, description: '不会被选中' }],
},
],
['餐厅'],
)
expect(text).toContain('餐厅: 长方形饭桌位于画面中央')
expect(text).toContain('可站位置:')
expect(text).toContain('饭桌左侧靠桌边的位置')
expect(text).not.toContain('客厅')
})
})
@@ -0,0 +1,26 @@
import { describe, expect, it } from 'vitest'
import { resolveInsertPanelUserInput } from '@/lib/novel-promotion/insert-panel'
describe('insert panel user input normalization', () => {
it('uses localized default instruction when AI analyze sends empty input', () => {
expect(resolveInsertPanelUserInput({ userInput: '' }, 'zh')).toBe(
'请根据前后镜头自动分析并插入一个自然衔接的新分镜。',
)
expect(resolveInsertPanelUserInput({ userInput: ' ' }, 'en')).toBe(
'Automatically analyze the surrounding panels and insert a naturally connected new panel.',
)
})
it('prefers explicit user input over fallback prompt or default', () => {
expect(resolveInsertPanelUserInput({
userInput: ' 添加一个特写反应镜头 ',
prompt: 'unused prompt',
}, 'zh')).toBe('添加一个特写反应镜头')
})
it('falls back to prompt when userInput is missing', () => {
expect(resolveInsertPanelUserInput({
prompt: ' Insert a pause beat between these panels. ',
}, 'en')).toBe('Insert a pause beat between these panels.')
})
})
@@ -0,0 +1,64 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const {
useQueryClientMock,
useMutationMock,
requestJsonWithErrorMock,
} = vi.hoisted(() => ({
useQueryClientMock: vi.fn(() => ({ invalidateQueries: vi.fn() })),
useMutationMock: vi.fn((options: unknown) => options),
requestJsonWithErrorMock: vi.fn(),
}))
vi.mock('@tanstack/react-query', () => ({
useQueryClient: () => useQueryClientMock(),
useMutation: (options: unknown) => useMutationMock(options),
}))
vi.mock('@/lib/query/mutations/mutation-shared', async () => {
const actual = await vi.importActual<typeof import('@/lib/query/mutations/mutation-shared')>(
'@/lib/query/mutations/mutation-shared',
)
return {
...actual,
invalidateQueryTemplates: vi.fn(),
requestJsonWithError: requestJsonWithErrorMock,
}
})
import { useConfirmProjectLocationSelection } from '@/lib/query/mutations/location-management-mutations'
interface ConfirmLocationSelectionMutation {
mutationFn: (variables: { locationId: string }) => Promise<unknown>
}
describe('project location-backed confirm mutations', () => {
beforeEach(() => {
useQueryClientMock.mockClear()
useMutationMock.mockClear()
requestJsonWithErrorMock.mockReset()
requestJsonWithErrorMock.mockResolvedValue({ success: true })
})
it('routes prop confirmation to the unified asset select-render endpoint', async () => {
const mutation = useConfirmProjectLocationSelection('project-1', 'prop') as unknown as ConfirmLocationSelectionMutation
await mutation.mutationFn({ locationId: 'prop-1' })
expect(requestJsonWithErrorMock).toHaveBeenCalledTimes(1)
expect(requestJsonWithErrorMock).toHaveBeenCalledWith(
'/api/assets/prop-1/select-render',
{
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
scope: 'project',
kind: 'prop',
projectId: 'project-1',
confirm: true,
}),
},
'确认选择失败',
)
})
})
@@ -0,0 +1,91 @@
import * as React from 'react'
import { createElement } from 'react'
import { describe, expect, it, vi } from 'vitest'
import { renderToStaticMarkup } from 'react-dom/server'
import NovelInputStage from '@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/NovelInputStage'
vi.mock('next-intl', () => ({
useTranslations: () => (key: string, values?: Record<string, string | number>) => {
if (values && 'name' in values) {
return `${key}:${String(values.name)}`
}
return key
},
}))
vi.mock('@/components/story-input/StoryInputComposer', () => ({
default: ({
minRows,
maxHeightViewportRatio,
textareaClassName,
topRight,
footer,
secondaryActions,
primaryAction,
}: {
minRows: number
maxHeightViewportRatio: number
textareaClassName?: string
topRight?: React.ReactNode
footer?: React.ReactNode
secondaryActions?: React.ReactNode
primaryAction: React.ReactNode
}) => createElement(
'section',
{
'data-min-rows': String(minRows),
'data-max-height-ratio': String(maxHeightViewportRatio),
'data-textarea-class': textareaClassName,
},
topRight,
footer,
secondaryActions,
primaryAction,
'StoryInputComposer',
),
}))
vi.mock('@/components/task/TaskStatusInline', () => ({
default: () => createElement('span', null, 'TaskStatusInline'),
}))
vi.mock('@/components/home/AiWriteModal', () => ({
default: () => createElement('div', null, 'AiWriteModal'),
}))
vi.mock('@/lib/api-fetch', () => ({
apiFetch: vi.fn(),
}))
vi.mock('@/lib/home/ai-story-expand', () => ({
expandHomeStory: vi.fn(),
}))
vi.mock('@/components/ui/icons', () => ({
AppIcon: ({ name, ...props }: { name: string } & Record<string, unknown>) =>
createElement('span', { ...props, 'data-icon': name }),
}))
describe('NovelInputStage', () => {
it('uses the shared composer with a taller adaptive baseline in story mode', () => {
Reflect.set(globalThis, 'React', React)
const html = renderToStaticMarkup(
createElement(NovelInputStage, {
novelText: '',
episodeName: '剧集 1',
onNovelTextChange: () => undefined,
onNext: () => undefined,
}),
)
expect(html).toContain('StoryInputComposer')
expect(html).toContain('data-min-rows="8"')
expect(html).toContain('data-max-height-ratio="0.5"')
expect(html).toContain('data-textarea-class="px-0 pt-0 pb-3 align-top"')
expect(html).toContain('aiWrite.trigger')
expect(html).toContain('AiWriteModal')
expect(html).not.toContain('storyInput.wordCount 0')
expect(html).not.toContain('storyInput.currentConfigSummary')
})
})
@@ -0,0 +1,36 @@
import { describe, expect, it } from 'vitest'
import { usePanelTaskStatus } from '@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/video/panel-card/runtime/hooks/usePanelTaskStatus'
describe('panel task status error code mapping', () => {
it('uses explicit error code for user-facing panel error display', () => {
const result = usePanelTaskStatus({
panel: {
storyboardId: 'sb-1',
panelIndex: 0,
videoErrorCode: 'EXTERNAL_ERROR',
videoErrorMessage: 'raw upstream message',
},
hasVisibleBaseVideo: false,
tCommon: (key) => key,
})
expect(result.panelErrorDisplay?.code).toBe('EXTERNAL_ERROR')
expect(result.panelErrorDisplay?.message).toBe('raw upstream message')
})
it('shows fixed unsupported-format message for VIDEO_API_FORMAT_UNSUPPORTED', () => {
const result = usePanelTaskStatus({
panel: {
storyboardId: 'sb-1',
panelIndex: 0,
videoErrorCode: 'VIDEO_API_FORMAT_UNSUPPORTED',
videoErrorMessage: 'VIDEO_API_FORMAT_UNSUPPORTED: OPENAI_COMPAT_VIDEO_TEMPLATE_TASK_ID_NOT_FOUND',
},
hasVisibleBaseVideo: false,
tCommon: (key) => key,
})
expect(result.panelErrorDisplay?.code).toBe('VIDEO_API_FORMAT_UNSUPPORTED')
expect(result.panelErrorDisplay?.message).toBe('当前视频接口格式暂不支持。')
})
})
@@ -0,0 +1,80 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const {
useQueryClientMock,
useMutationMock,
requestTaskResponseWithErrorMock,
} = vi.hoisted(() => ({
useQueryClientMock: vi.fn(() => ({ invalidateQueries: vi.fn() })),
useMutationMock: vi.fn((options: unknown) => options),
requestTaskResponseWithErrorMock: vi.fn(),
}))
vi.mock('@tanstack/react-query', () => ({
useQueryClient: () => useQueryClientMock(),
useMutation: (options: unknown) => useMutationMock(options),
}))
vi.mock('@/lib/query/mutations/mutation-shared', async () => {
const actual = await vi.importActual<typeof import('@/lib/query/mutations/mutation-shared')>(
'@/lib/query/mutations/mutation-shared',
)
return {
...actual,
requestTaskResponseWithError: requestTaskResponseWithErrorMock,
}
})
import { useAnalyzeProjectGlobalAssets } from '@/lib/query/mutations/useProjectConfigMutations'
interface AnalyzeGlobalMutation {
mutationFn: () => Promise<unknown>
}
describe('project global analyze mutation', () => {
beforeEach(() => {
useQueryClientMock.mockClear()
useMutationMock.mockClear()
requestTaskResponseWithErrorMock.mockReset()
})
it('returns async task submission instead of waiting for final task result', async () => {
requestTaskResponseWithErrorMock.mockResolvedValue({
json: async () => ({
async: true,
taskId: 'task-global-1',
status: 'queued',
deduped: false,
}),
} as Response)
const mutation = useAnalyzeProjectGlobalAssets('project-1') as unknown as AnalyzeGlobalMutation
const result = await mutation.mutationFn() as { taskId: string; async: boolean }
expect(requestTaskResponseWithErrorMock).toHaveBeenCalledWith(
'/api/novel-promotion/project-1/analyze-global',
{
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ async: true }),
},
'Failed to analyze global assets',
)
expect(result).toEqual({
async: true,
taskId: 'task-global-1',
status: 'queued',
deduped: false,
})
})
it('fails explicitly when route does not return an async task submission payload', async () => {
requestTaskResponseWithErrorMock.mockResolvedValue({
json: async () => ({ success: true }),
} as Response)
const mutation = useAnalyzeProjectGlobalAssets('project-1') as unknown as AnalyzeGlobalMutation
await expect(mutation.mutationFn()).rejects.toThrow('Failed to submit global asset analysis task')
})
})
@@ -0,0 +1,81 @@
import { describe, expect, it } from 'vitest'
import {
hasScriptArtifacts,
hasStoryboardArtifacts,
hasVideoArtifacts,
resolveEpisodeStageArtifacts,
} from '@/lib/novel-promotion/stage-readiness'
describe('stage readiness', () => {
it('treats script as ready only when at least one clip has non-empty screenplay', () => {
expect(hasScriptArtifacts([])).toBe(false)
expect(hasScriptArtifacts([
{ id: 'clip-1', summary: '', location: null, characters: null, props: null, content: 'a', screenplay: '' },
])).toBe(false)
expect(hasScriptArtifacts([
{ id: 'clip-1', summary: '', location: null, characters: null, props: null, content: 'a', screenplay: ' {"scenes":[]}' },
])).toBe(true)
})
it('treats storyboard as ready only when at least one storyboard has panels', () => {
expect(hasStoryboardArtifacts([])).toBe(false)
expect(hasStoryboardArtifacts([{ panels: [] }])).toBe(false)
expect(hasStoryboardArtifacts([{ panels: [{ id: 'panel-1' }] }])).toBe(true)
})
it('treats video as ready only when at least one panel has videoUrl', () => {
expect(hasVideoArtifacts([{ panels: [{ id: 'panel-1', videoUrl: '' }] }])).toBe(false)
expect(hasVideoArtifacts([{ panels: [{ id: 'panel-1', videoUrl: 'https://example.com/video.mp4' }] }])).toBe(true)
})
it('derives full episode stage artifacts from persisted outputs', () => {
const readiness = resolveEpisodeStageArtifacts({
novelText: 'story',
clips: [
{ id: 'clip-1', summary: '', location: null, characters: null, props: null, content: 'a', screenplay: '{"scenes":[]}' },
],
storyboards: [
{
id: 'sb-1',
episodeId: 'ep-1',
clipId: 'clip-1',
storyboardTextJson: null,
panelCount: 1,
storyboardImageUrl: null,
panels: [{
id: 'panel-1',
storyboardId: 'sb-1',
panelIndex: 0,
panelNumber: 1,
shotType: null,
cameraMove: null,
description: null,
location: null,
characters: null,
props: null,
srtSegment: null,
srtStart: null,
srtEnd: null,
duration: null,
imagePrompt: null,
imageUrl: null,
imageHistory: null,
videoPrompt: null,
videoUrl: 'https://example.com/video.mp4',
photographyRules: null,
actingNotes: null,
}],
},
],
voiceLines: [{ id: 'voice-1' }],
})
expect(readiness).toEqual({
hasStory: true,
hasScript: true,
hasStoryboard: true,
hasVideo: true,
hasVoice: true,
})
})
})
@@ -0,0 +1,112 @@
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
const {
useStateMock,
logErrorMock,
refreshAssetsMock,
updateVoiceSettingsMutateAsyncMock,
saveDesignedVoiceMutateAsyncMock,
setVoiceDesignCharacterMock,
} = vi.hoisted(() => ({
useStateMock: vi.fn(),
logErrorMock: vi.fn(),
refreshAssetsMock: vi.fn(),
updateVoiceSettingsMutateAsyncMock: vi.fn(),
saveDesignedVoiceMutateAsyncMock: vi.fn(),
setVoiceDesignCharacterMock: vi.fn(),
}))
vi.mock('react', async () => {
const actual = await vi.importActual<typeof import('react')>('react')
return {
...actual,
useState: useStateMock,
}
})
vi.mock('next-intl', () => ({
useTranslations: () => (key: string, values?: Record<string, unknown>) => {
if (key === 'tts.voiceDesignSaved') {
return `voice saved:${String(values?.name ?? '')}`
}
if (key === 'tts.saveVoiceDesignFailed') {
return `save failed:${String(values?.error ?? '')}`
}
if (key === 'common.unknownError') {
return 'unknown error'
}
return key
},
}))
vi.mock('@/lib/logging/core', () => ({
logError: (...args: unknown[]) => logErrorMock(...args),
}))
vi.mock('@/lib/query/hooks', () => ({
useProjectAssets: () => ({
data: {
characters: [{
id: 'character-1',
name: 'Hero',
customVoiceUrl: null,
}],
},
}),
useRefreshProjectAssets: () => refreshAssetsMock,
useUpdateProjectCharacterVoiceSettings: () => ({
mutateAsync: updateVoiceSettingsMutateAsyncMock,
}),
useSaveProjectDesignedVoice: () => ({
mutateAsync: saveDesignedVoiceMutateAsyncMock,
}),
}))
import { useTTSGeneration } from '@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/assets/hooks/useTTSGeneration'
describe('useTTSGeneration', () => {
const originalAlert = globalThis.alert
beforeEach(() => {
useStateMock.mockReset()
logErrorMock.mockReset()
refreshAssetsMock.mockReset()
updateVoiceSettingsMutateAsyncMock.mockReset()
saveDesignedVoiceMutateAsyncMock.mockReset()
setVoiceDesignCharacterMock.mockReset()
saveDesignedVoiceMutateAsyncMock.mockResolvedValue({
success: true,
audioUrl: 'https://signed.example.com/audio.wav',
})
globalThis.alert = vi.fn()
useStateMock.mockReturnValue([
{
id: 'character-1',
name: 'Hero',
hasExistingVoice: false,
},
setVoiceDesignCharacterMock,
])
})
afterEach(() => {
globalThis.alert = originalAlert
})
it('does not send a second voice update request after designed voice save succeeds', async () => {
const hook = useTTSGeneration({ projectId: 'project-1' })
await hook.handleVoiceDesignSave('voice-1', 'base64-audio')
expect(saveDesignedVoiceMutateAsyncMock).toHaveBeenCalledTimes(1)
expect(saveDesignedVoiceMutateAsyncMock).toHaveBeenCalledWith({
characterId: 'character-1',
voiceId: 'voice-1',
audioBase64: 'base64-audio',
})
expect(updateVoiceSettingsMutateAsyncMock).not.toHaveBeenCalled()
expect(refreshAssetsMock).toHaveBeenCalledTimes(1)
expect(globalThis.alert).toHaveBeenCalledWith('voice saved:Hero')
expect(setVoiceDesignCharacterMock).toHaveBeenCalledWith(null)
})
})
@@ -0,0 +1,67 @@
import { describe, expect, it } from 'vitest'
import {
filterNormalVideoModelOptions,
isFirstLastFrameOnlyModel,
supportsFirstLastFrame,
} from '@/lib/model-capabilities/video-model-options'
import type { VideoModelOption } from '@/lib/novel-promotion/stages/video-stage-runtime/types'
describe('video model options partition', () => {
const models: VideoModelOption[] = [
{
value: 'p::normal',
label: 'normal',
capabilities: {
video: {
generationModeOptions: ['normal'],
firstlastframe: false,
},
},
},
{
value: 'p::firstlast-only',
label: 'firstlast-only',
capabilities: {
video: {
generationModeOptions: ['firstlastframe'],
firstlastframe: true,
},
},
},
{
value: 'p::both',
label: 'both',
capabilities: {
video: {
generationModeOptions: ['normal', 'firstlastframe'],
firstlastframe: true,
},
},
},
{
value: 'p::custom-no-capability',
label: 'custom-no-capability',
},
]
it('detects firstlastframe support and firstlastframe-only capability', () => {
expect(supportsFirstLastFrame(models[0])).toBe(false)
expect(supportsFirstLastFrame(models[1])).toBe(true)
expect(supportsFirstLastFrame(models[2])).toBe(true)
expect(supportsFirstLastFrame(models[3])).toBe(false)
expect(isFirstLastFrameOnlyModel(models[0])).toBe(false)
expect(isFirstLastFrameOnlyModel(models[1])).toBe(true)
expect(isFirstLastFrameOnlyModel(models[2])).toBe(false)
expect(isFirstLastFrameOnlyModel(models[3])).toBe(false)
})
it('filters out firstlastframe-only models from normal video model list', () => {
const normalModels = filterNormalVideoModelOptions(models)
expect(normalModels.map((item) => item.value)).toEqual([
'p::normal',
'p::both',
'p::custom-no-capability',
])
})
})
@@ -0,0 +1,167 @@
import React from 'react'
import { renderToStaticMarkup } from 'react-dom/server'
import { describe, expect, it, vi } from 'vitest'
import VideoPanelCardBody from '@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/video/panel-card/VideoPanelCardBody'
import type { VideoPanelRuntime } from '@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/video/panel-card/hooks/useVideoPanelActions'
vi.mock('@/components/task/TaskStatusInline', () => ({
default: () => React.createElement('span', null, 'task-status'),
}))
vi.mock('@/components/ui/config-modals/ModelCapabilityDropdown', () => ({
ModelCapabilityDropdown: () => React.createElement('div', null, 'model-dropdown'),
}))
vi.mock('@/components/ui/icons', () => ({
AppIcon: ({ name }: { name: string }) => React.createElement('span', null, name),
}))
function createRuntime(overrides: Partial<VideoPanelRuntime> = {}): VideoPanelRuntime {
const translate = (key: string, values?: Record<string, unknown>) => {
if (key === 'firstLastFrame.asLastFrameFor') {
return `作为镜头 ${String(values?.number ?? '')} 的尾帧`
}
if (key === 'firstLastFrame.asFirstFrameFor') {
return `作为镜头 ${String(values?.number ?? '')} 的首帧`
}
if (key === 'firstLastFrame.generate') return '生成首尾帧视频'
if (key === 'firstLastFrame.generated') return '首尾帧视频已生成'
if (key === 'promptModal.promptLabel') return '视频提示词'
if (key === 'promptModal.placeholder') return '输入首尾帧视频提示词...'
if (key === 'panelCard.clickToEditPrompt') return '点击编辑提示词...'
if (key === 'panelCard.selectModel') return '选择模型'
if (key === 'panelCard.generateVideo') return '生成视频'
if (key === 'panelCard.unknownShotType') return '未知镜头'
if (key === 'stage.hasSynced') return '已生成'
if (key === 'promptModal.duration') return '秒'
return key
}
const runtime = {
t: translate,
tCommon: (key: string) => key,
panel: {
storyboardId: 'sb-1',
panelIndex: 2,
panelId: 'panel-2',
imageUrl: 'https://example.com/frame-2.jpg',
videoUrl: null,
videoGenerationMode: null,
lipSyncVideoUrl: null,
textPanel: {
shot_type: '平视中景',
description: '谢俞站在宴席中央',
duration: 3,
},
},
panelIndex: 2,
panelKey: 'sb-1-2',
media: {
showLipSyncVideo: true,
onToggleLipSyncVideo: () => undefined,
onPreviewImage: () => undefined,
baseVideoUrl: undefined,
currentVideoUrl: undefined,
},
taskStatus: {
isVideoTaskRunning: false,
isLipSyncTaskRunning: false,
taskRunningVideoLabel: '生成中',
lipSyncInlineState: null,
},
videoModel: {
selectedModel: 'veo-3.1',
setSelectedModel: () => undefined,
capabilityFields: [],
generationOptions: {},
setCapabilityValue: () => undefined,
missingCapabilityFields: [],
videoModelOptions: [],
},
player: {
isPlaying: false,
},
promptEditor: {
isEditing: false,
editingPrompt: '',
setEditingPrompt: () => undefined,
handleStartEdit: () => undefined,
handleSave: () => undefined,
handleCancelEdit: () => undefined,
isSavingPrompt: false,
localPrompt: '人物从席间回身,接到下一镜头',
},
voiceManager: {
hasMatchedAudio: false,
hasMatchedVoiceLines: false,
audioGenerateError: null,
localVoiceLines: [],
isVoiceLineTaskRunning: () => false,
handlePlayVoiceLine: () => undefined,
handleGenerateAudio: async () => undefined,
playingVoiceLineId: null,
},
lipSync: {
handleStartLipSync: () => undefined,
executingLipSync: false,
},
layout: {
isLinked: true,
isLastFrame: true,
nextPanel: {
storyboardId: 'sb-1',
panelIndex: 3,
imageUrl: 'https://example.com/frame-3.jpg',
},
prevPanel: {
storyboardId: 'sb-1',
panelIndex: 1,
imageUrl: 'https://example.com/frame-1.jpg',
},
hasNext: true,
flModel: 'veo-3.1',
flModelOptions: [],
flGenerationOptions: {},
flCapabilityFields: [],
flMissingCapabilityFields: [],
flCustomPrompt: '',
defaultFlPrompt: '',
videoRatio: '9:16',
},
actions: {
onGenerateVideo: () => undefined,
onUpdatePanelVideoModel: () => undefined,
onToggleLink: () => undefined,
onFlModelChange: () => undefined,
onFlCapabilityChange: () => undefined,
onFlCustomPromptChange: () => undefined,
onResetFlPrompt: () => undefined,
onGenerateFirstLastFrame: () => undefined,
},
computed: {
showLipSyncSection: false,
canLipSync: false,
hasVisibleBaseVideo: false,
},
}
return {
...runtime,
...overrides,
} as unknown as VideoPanelRuntime
}
describe('VideoPanelCardBody', () => {
it('renders incoming and outgoing first-last-frame UI for chained panel', () => {
const markup = renderToStaticMarkup(
React.createElement(VideoPanelCardBody, {
runtime: createRuntime(),
}),
)
expect(markup).toContain('作为镜头 2 的尾帧')
expect(markup).toContain('作为镜头 4 的首帧')
expect(markup).toContain('视频提示词')
expect(markup).toContain('生成首尾帧视频')
})
})
@@ -0,0 +1,44 @@
import { describe, expect, it, vi } from 'vitest'
vi.mock('react', async () => {
const actual = await vi.importActual<typeof import('react')>('react')
return {
...actual,
useMemo: <T,>(factory: () => T) => factory(),
}
})
import { useVideoPanelsProjection } from '@/lib/novel-promotion/stages/video-stage-runtime/useVideoPanelsProjection'
describe('video panels projection error code', () => {
it('projects failed task lastError code/message onto panel fields', () => {
const result = useVideoPanelsProjection({
clips: [{ id: 'clip-1', start: 0, end: 5, summary: 'clip' }],
storyboards: [{
id: 'sb-1',
clipId: 'clip-1',
panels: [{
id: 'panel-1',
panelIndex: 0,
description: 'panel',
}],
}],
panelVideoStates: {
getTaskState: () => ({
phase: 'failed',
lastError: {
code: 'EXTERNAL_ERROR',
message: 'upstream failed',
},
}),
},
panelLipStates: {
getTaskState: () => null,
},
})
expect(result.allPanels).toHaveLength(1)
expect(result.allPanels[0]?.videoErrorCode).toBe('EXTERNAL_ERROR')
expect(result.allPanels[0]?.videoErrorMessage).toBe('upstream failed')
})
})
@@ -0,0 +1,92 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const {
useStateMock,
useCallbackMock,
useQueryClientMock,
upsertTaskTargetOverlayMock,
} = vi.hoisted(() => ({
useStateMock: vi.fn(),
useCallbackMock: vi.fn((fn: unknown) => fn),
useQueryClientMock: vi.fn(() => ({ id: 'query-client' })),
upsertTaskTargetOverlayMock: vi.fn(),
}))
vi.mock('react', async () => {
const actual = await vi.importActual<typeof import('react')>('react')
return {
...actual,
useState: useStateMock,
useCallback: useCallbackMock,
}
})
vi.mock('@tanstack/react-query', () => ({
useQueryClient: () => useQueryClientMock(),
}))
vi.mock('@/lib/query/task-target-overlay', () => ({
upsertTaskTargetOverlay: (...args: unknown[]) => upsertTaskTargetOverlayMock(...args),
}))
import { useVoiceGenerationActions } from '@/lib/novel-promotion/stages/voice-stage-runtime/useVoiceGenerationActions'
describe('useVoiceGenerationActions', () => {
beforeEach(() => {
useStateMock.mockReset()
useCallbackMock.mockClear()
useQueryClientMock.mockClear()
upsertTaskTargetOverlayMock.mockReset()
useStateMock
.mockImplementationOnce(() => [false, vi.fn()])
.mockImplementationOnce(() => [false, vi.fn()])
.mockImplementationOnce(() => [false, vi.fn()])
})
it('adds an optimistic task overlay for async single-line generation', async () => {
const setPendingVoiceGenerationByLineId = vi.fn()
const notifyVoiceLinesChanged = vi.fn()
const generateVoiceMutation = {
mutateAsync: vi.fn(async () => ({
success: true,
async: true,
taskId: 'task-voice-1',
})),
}
const runtime = useVoiceGenerationActions({
projectId: 'project-1',
episodeId: 'episode-1',
t: (key: string) => key,
voiceLines: [],
linesWithAudio: 0,
speakerCharacterMap: {},
speakerVoices: {},
analyzeVoiceMutation: { mutateAsync: vi.fn() },
generateVoiceMutation,
downloadVoicesMutation: { mutateAsync: vi.fn() },
loadData: vi.fn(),
notifyVoiceLinesChanged,
setPendingVoiceGenerationByLineId,
})
await runtime.handleGenerateLine('line-1')
expect(upsertTaskTargetOverlayMock).toHaveBeenCalledWith(
{ id: 'query-client' },
{
projectId: 'project-1',
targetType: 'NovelPromotionVoiceLine',
targetId: 'line-1',
phase: 'queued',
runningTaskId: 'task-voice-1',
runningTaskType: 'voice_line',
intent: 'generate',
hasOutputAtStart: false,
},
)
expect(notifyVoiceLinesChanged).toHaveBeenCalledTimes(1)
expect(setPendingVoiceGenerationByLineId).toHaveBeenCalledTimes(2)
})
})
@@ -0,0 +1,256 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const { useEffectMock, useRefMock } = vi.hoisted(() => ({
useEffectMock: vi.fn(),
useRefMock: vi.fn(),
}))
const { apiFetchMock } = vi.hoisted(() => ({
apiFetchMock: vi.fn(),
}))
vi.mock('react', async () => {
const actual = await vi.importActual<typeof import('react')>('react')
return {
...actual,
useEffect: useEffectMock,
useRef: useRefMock,
}
})
vi.mock('@/lib/api-fetch', () => ({
apiFetch: (...args: unknown[]) => apiFetchMock(...args),
}))
import { useVoiceRuntimeSync } from '@/lib/novel-promotion/stages/voice-stage-runtime/useVoiceRuntimeSync'
import type { VoiceLine } from '@/lib/novel-promotion/stages/voice-stage-runtime/types'
function buildVoiceLine(overrides: Partial<VoiceLine>): VoiceLine {
return {
id: 'line-1',
lineIndex: 1,
speaker: '旁白',
content: '测试台词',
emotionPrompt: null,
emotionStrength: null,
audioUrl: null,
updatedAt: '2026-03-07T12:00:00.000Z',
lineTaskRunning: false,
...overrides,
}
}
describe('useVoiceRuntimeSync', () => {
beforeEach(() => {
useEffectMock.mockReset()
useRefMock.mockReset()
apiFetchMock.mockReset()
useRefMock.mockImplementation((initialValue: unknown) => ({
current: initialValue,
}))
})
it('keeps pending regeneration until the line updatedAt advances', () => {
const loadData = vi.fn(async () => undefined)
const setPendingVoiceGenerationByLineId = vi.fn()
const effectCallbacks: Array<() => void | (() => void)> = []
useEffectMock.mockImplementation((callback: () => void | (() => void)) => {
effectCallbacks.push(callback)
})
const pendingGeneration = {
'line-1': {
submittedUpdatedAt: '2026-03-07T12:00:00.000Z',
startedAt: '2026-03-07T11:59:59.000Z',
taskId: 'task-1',
taskStatus: 'completed' as const,
taskErrorMessage: null,
},
}
useVoiceRuntimeSync({
loadData,
voiceLines: [buildVoiceLine({
audioUrl: '/m/voice-old.wav',
updatedAt: '2026-03-07T12:00:00.000Z',
})],
activeVoiceTaskLineIds: new Set(),
pendingVoiceGenerationByLineId: pendingGeneration,
setPendingVoiceGenerationByLineId,
})
const firstRenderEffects = effectCallbacks.splice(0)
firstRenderEffects[2]?.()
const keepPendingUpdater = setPendingVoiceGenerationByLineId.mock.calls[0]?.[0] as
| ((prev: typeof pendingGeneration) => typeof pendingGeneration)
| undefined
expect(keepPendingUpdater?.(pendingGeneration)).toBe(pendingGeneration)
useVoiceRuntimeSync({
loadData,
voiceLines: [buildVoiceLine({
audioUrl: '/m/voice-new.wav',
updatedAt: '2026-03-07T12:00:03.000Z',
})],
activeVoiceTaskLineIds: new Set(),
pendingVoiceGenerationByLineId: pendingGeneration,
setPendingVoiceGenerationByLineId,
})
const secondRenderEffects = effectCallbacks.splice(0)
secondRenderEffects[2]?.()
const settleUpdater = setPendingVoiceGenerationByLineId.mock.calls[1]?.[0] as
| ((prev: typeof pendingGeneration) => Record<string, never>)
| undefined
expect(settleUpdater?.(pendingGeneration)).toEqual({})
})
it('polls task status for pending generations with task ids', async () => {
const loadData = vi.fn(async () => undefined)
const setPendingVoiceGenerationByLineId = vi.fn()
const effectCallbacks: Array<() => void | (() => void)> = []
const windowStub = {
setInterval: vi.fn(() => 123 as unknown as number),
clearInterval: vi.fn(),
}
vi.stubGlobal('window', windowStub)
apiFetchMock.mockResolvedValue({
ok: true,
json: async () => ({
task: {
status: 'processing',
errorMessage: null,
},
}),
})
useEffectMock.mockImplementation((callback: () => void | (() => void)) => {
effectCallbacks.push(callback)
})
useVoiceRuntimeSync({
loadData,
voiceLines: [buildVoiceLine({
audioUrl: '/m/voice-old.wav',
updatedAt: '2026-03-07T12:00:00.000Z',
})],
activeVoiceTaskLineIds: new Set(),
pendingVoiceGenerationByLineId: {
'line-1': {
submittedUpdatedAt: '2026-03-07T12:00:00.000Z',
startedAt: '2026-03-07T12:24:10.000Z',
taskId: 'task-1',
taskStatus: 'queued',
taskErrorMessage: null,
},
},
setPendingVoiceGenerationByLineId,
})
const renderEffects = effectCallbacks.splice(0)
const cleanup = renderEffects[3]?.()
await Promise.resolve()
expect(apiFetchMock).toHaveBeenCalledWith('/api/tasks/task-1', {
method: 'GET',
cache: 'no-store',
})
expect(windowStub.setInterval).toHaveBeenCalledWith(expect.any(Function), 1200)
cleanup?.()
expect(windowStub.clearInterval).toHaveBeenCalledWith(123)
vi.unstubAllGlobals()
})
it('notifies task failure with backend error message', () => {
const loadData = vi.fn(async () => undefined)
const setPendingVoiceGenerationByLineId = vi.fn()
const onTaskFailure = vi.fn()
const effectCallbacks: Array<() => void | (() => void)> = []
useEffectMock.mockImplementation((callback: () => void | (() => void)) => {
effectCallbacks.push(callback)
})
useVoiceRuntimeSync({
loadData,
voiceLines: [buildVoiceLine({
id: 'line-9',
lineIndex: 9,
})],
activeVoiceTaskLineIds: new Set(),
pendingVoiceGenerationByLineId: {
'line-9': {
submittedUpdatedAt: '2026-03-07T12:00:00.000Z',
startedAt: '2026-03-07T12:24:10.000Z',
taskId: 'task-failed-1',
taskStatus: 'failed',
taskErrorMessage: 'QwenTTS voiceId missing',
},
},
setPendingVoiceGenerationByLineId,
onTaskFailure,
})
const renderEffects = effectCallbacks.splice(0)
renderEffects[1]?.()
expect(onTaskFailure).toHaveBeenCalledWith({
lineId: 'line-9',
line: expect.objectContaining({
id: 'line-9',
lineIndex: 9,
}),
taskId: 'task-failed-1',
errorMessage: 'QwenTTS voiceId missing',
})
})
it('treats canceled task as terminal failure for pending voice generation', () => {
const loadData = vi.fn(async () => undefined)
const setPendingVoiceGenerationByLineId = vi.fn()
const onTaskFailure = vi.fn()
const effectCallbacks: Array<() => void | (() => void)> = []
useEffectMock.mockImplementation((callback: () => void | (() => void)) => {
effectCallbacks.push(callback)
})
useVoiceRuntimeSync({
loadData,
voiceLines: [buildVoiceLine({
id: 'line-10',
lineIndex: 10,
})],
activeVoiceTaskLineIds: new Set(),
pendingVoiceGenerationByLineId: {
'line-10': {
submittedUpdatedAt: '2026-03-07T12:00:00.000Z',
startedAt: '2026-03-07T12:24:10.000Z',
taskId: 'task-canceled-1',
taskStatus: 'canceled',
taskErrorMessage: 'Task cancelled by user',
},
},
setPendingVoiceGenerationByLineId,
onTaskFailure,
})
const renderEffects = effectCallbacks.splice(0)
renderEffects[1]?.()
expect(onTaskFailure).toHaveBeenCalledWith({
lineId: 'line-10',
line: expect.objectContaining({
id: 'line-10',
lineIndex: 10,
}),
taskId: 'task-canceled-1',
errorMessage: 'Task cancelled by user',
})
})
})
@@ -0,0 +1,88 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const {
useStateMock,
useRefMock,
useCallbackMock,
useEffectMock,
mutateAsyncMock,
} = vi.hoisted(() => ({
useStateMock: vi.fn(),
useRefMock: vi.fn((value: unknown) => ({ current: value })),
useCallbackMock: vi.fn((fn: unknown) => fn),
useEffectMock: vi.fn(),
mutateAsyncMock: vi.fn(),
}))
vi.mock('react', async () => {
const actual = await vi.importActual<typeof import('react')>('react')
return {
...actual,
useState: useStateMock,
useRef: useRefMock,
useCallback: useCallbackMock,
useEffect: useEffectMock,
}
})
vi.mock('@/lib/query/hooks', () => ({
useFetchProjectVoiceStageData: () => ({
mutateAsync: mutateAsyncMock,
}),
}))
import { useVoiceStageDataLoader } from '@/lib/novel-promotion/stages/voice-stage-runtime/useVoiceStageDataLoader'
describe('useVoiceStageDataLoader', () => {
beforeEach(() => {
useStateMock.mockReset()
useRefMock.mockClear()
useCallbackMock.mockClear()
useEffectMock.mockClear()
mutateAsyncMock.mockReset()
})
it('keeps background reloads from re-entering blocking loading state', async () => {
const setVoiceLines = vi.fn()
const setSpeakerVoices = vi.fn()
const setProjectSpeakers = vi.fn()
const setLoading = vi.fn()
useStateMock
.mockImplementationOnce(() => [[], setVoiceLines])
.mockImplementationOnce(() => [{}, setSpeakerVoices])
.mockImplementationOnce(() => [[], setProjectSpeakers])
.mockImplementationOnce(() => [true, setLoading])
mutateAsyncMock
.mockResolvedValueOnce({
voiceLines: [{ id: 'line-1' }],
speakerVoices: { Narrator: { voiceType: 'uploaded', voiceId: 'voice-1' } },
speakers: ['Narrator'],
})
.mockResolvedValueOnce({
voiceLines: [{ id: 'line-1' }],
speakerVoices: { Narrator: { voiceType: 'uploaded', voiceId: 'voice-2' } },
speakers: ['Narrator'],
})
const hook = useVoiceStageDataLoader({
projectId: 'project-1',
episodeId: 'episode-1',
})
await hook.loadData()
await hook.loadData()
expect(
setLoading.mock.calls.filter(([value]) => value === true),
).toHaveLength(1)
expect(
setLoading.mock.calls.filter(([value]) => value === false),
).toHaveLength(2)
expect(setVoiceLines).toHaveBeenNthCalledWith(1, [{ id: 'line-1' }])
expect(setVoiceLines).toHaveBeenNthCalledWith(2, [{ id: 'line-1' }])
expect(mutateAsyncMock).toHaveBeenNthCalledWith(1, { episodeId: 'episode-1' })
expect(mutateAsyncMock).toHaveBeenNthCalledWith(2, { episodeId: 'episode-1' })
})
})
@@ -0,0 +1,81 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const { useEffectMock, useRefMock } = vi.hoisted(() => ({
useEffectMock: vi.fn(),
useRefMock: vi.fn(),
}))
vi.mock('react', async () => {
const actual = await vi.importActual<typeof import('react')>('react')
return {
...actual,
useEffect: useEffectMock,
useRef: useRefMock,
}
})
import { useWorkspaceAutoRun } from '@/app/[locale]/workspace/[projectId]/modes/novel-promotion/hooks/useWorkspaceAutoRun'
describe('useWorkspaceAutoRun', () => {
beforeEach(() => {
useEffectMock.mockReset()
useRefMock.mockReset()
useRefMock.mockImplementation((initialValue: unknown) => ({
current: initialValue,
}))
})
it('consumes autoRun=storyToScript and starts the story-to-script flow once', async () => {
const effectCallbacks: Array<() => void | (() => void)> = []
const router = { replace: vi.fn() }
const runWithRebuildConfirm = vi.fn(async () => undefined)
const runStoryToScriptFlow = vi.fn(async () => undefined)
useEffectMock.mockImplementation((callback: () => void | (() => void)) => {
effectCallbacks.push(callback)
})
useWorkspaceAutoRun({
searchParams: new URLSearchParams('episode=episode-1&autoRun=storyToScript'),
router,
episodeId: 'episode-1',
novelText: '第一章内容',
isTransitioning: false,
isStoryToScriptRunning: false,
runWithRebuildConfirm,
runStoryToScriptFlow,
})
effectCallbacks[0]?.()
expect(router.replace).toHaveBeenCalledWith('?episode=episode-1', { scroll: false })
expect(runWithRebuildConfirm).toHaveBeenCalledWith('storyToScript', runStoryToScriptFlow)
})
it('does not auto-run when the episode text is still empty', () => {
const effectCallbacks: Array<() => void | (() => void)> = []
const router = { replace: vi.fn() }
const runWithRebuildConfirm = vi.fn(async () => undefined)
const runStoryToScriptFlow = vi.fn(async () => undefined)
useEffectMock.mockImplementation((callback: () => void | (() => void)) => {
effectCallbacks.push(callback)
})
useWorkspaceAutoRun({
searchParams: new URLSearchParams('episode=episode-1&autoRun=storyToScript'),
router,
episodeId: 'episode-1',
novelText: ' ',
isTransitioning: false,
isStoryToScriptRunning: false,
runWithRebuildConfirm,
runStoryToScriptFlow,
})
effectCallbacks[0]?.()
expect(router.replace).not.toHaveBeenCalled()
expect(runWithRebuildConfirm).not.toHaveBeenCalled()
})
})
@@ -0,0 +1,71 @@
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
const {
generateVideoMutateAsyncMock,
batchGenerateVideosMutateAsyncMock,
updateProjectPanelVideoPromptMutateAsyncMock,
updateProjectClipMutateAsyncMock,
updateProjectConfigMutateAsyncMock,
} = vi.hoisted(() => ({
generateVideoMutateAsyncMock: vi.fn(),
batchGenerateVideosMutateAsyncMock: vi.fn(),
updateProjectPanelVideoPromptMutateAsyncMock: vi.fn(),
updateProjectClipMutateAsyncMock: vi.fn(),
updateProjectConfigMutateAsyncMock: vi.fn(),
}))
vi.mock('@/lib/query/hooks/useStoryboards', () => ({
useGenerateVideo: () => ({
mutateAsync: generateVideoMutateAsyncMock,
}),
useBatchGenerateVideos: () => ({
mutateAsync: batchGenerateVideosMutateAsyncMock,
}),
}))
vi.mock('@/lib/query/hooks', () => ({
useUpdateProjectPanelVideoPrompt: () => ({
mutateAsync: updateProjectPanelVideoPromptMutateAsyncMock,
}),
useUpdateProjectClip: () => ({
mutateAsync: updateProjectClipMutateAsyncMock,
}),
useUpdateProjectConfig: () => ({
mutateAsync: updateProjectConfigMutateAsyncMock,
}),
}))
import { useWorkspaceVideoActions } from '@/app/[locale]/workspace/[projectId]/modes/novel-promotion/hooks/useWorkspaceVideoActions'
describe('useWorkspaceVideoActions', () => {
const originalAlert = globalThis.alert
beforeEach(() => {
generateVideoMutateAsyncMock.mockReset()
batchGenerateVideosMutateAsyncMock.mockReset()
updateProjectPanelVideoPromptMutateAsyncMock.mockReset()
updateProjectClipMutateAsyncMock.mockReset()
updateProjectConfigMutateAsyncMock.mockReset()
globalThis.alert = vi.fn()
})
afterEach(() => {
globalThis.alert = originalAlert
})
it('single video mutation fails -> rethrows error for immediate lock cleanup', async () => {
generateVideoMutateAsyncMock.mockRejectedValueOnce(new Error('video submit failed'))
const actions = useWorkspaceVideoActions({
projectId: 'project-1',
episodeId: 'episode-1',
t: (key: string) => key,
})
await expect(
actions.handleGenerateVideo('storyboard-1', 0, 'veo-3.1'),
).rejects.toThrow('video submit failed')
expect(globalThis.alert).toHaveBeenCalledWith('execution.generationFailed: video submit failed')
})
})