first commit

This commit is contained in:
wsq
2026-05-13 21:58:19 +08:00
commit 0167c66cb7
1475 changed files with 233414 additions and 0 deletions
+34
View File
@@ -0,0 +1,34 @@
import { describe, expect, it } from 'vitest'
import { toAiRuntimeError } from '@/lib/ai-runtime/errors'
describe('toAiRuntimeError empty response mapping', () => {
it('maps nested Gemini empty response signal to EMPTY_RESPONSE even when status is 429', () => {
const upstreamError = new Error('Too Many Requests') as Error & {
status?: number
cause?: unknown
}
upstreamError.status = 429
upstreamError.cause = {
error: {
message: 'received empty response from Gemini: no meaningful content in candidates (request id: x)',
type: 'channel_error',
code: 'channel:empty_response',
},
code: 429,
status: 'Too Many Requests',
}
const runtimeError = toAiRuntimeError(upstreamError)
expect(runtimeError.code).toBe('EMPTY_RESPONSE')
expect(runtimeError.retryable).toBe(true)
})
it('keeps RATE_LIMIT when there is no empty response signal', () => {
const runtimeError = toAiRuntimeError({
status: 429,
message: 'Too Many Requests',
})
expect(runtimeError.code).toBe('RATE_LIMIT')
expect(runtimeError.retryable).toBe(true)
})
})
@@ -0,0 +1,71 @@
import type { UIMessage } from 'ai'
import { describe, expect, it } from 'vitest'
import { extractMessageContent } from '@/components/assistant/AssistantChatModal'
function createAssistantMessage(parts: Array<Record<string, unknown>>): UIMessage {
return {
id: 'assistant-message',
role: 'assistant',
parts,
} as unknown as UIMessage
}
describe('assistant chat modal message content parser', () => {
it('keeps reasoning parts out of normal visible lines', () => {
const message = createAssistantMessage([
{ type: 'reasoning', text: '先分析接口字段映射' },
{ type: 'text', text: '我需要你的 status 返回样例。' },
])
const content = extractMessageContent(message)
expect(content.lines).toEqual(['我需要你的 status 返回样例。'])
expect(content.reasoningLines).toEqual(['先分析接口字段映射'])
})
it('extracts think tags from text into reasoning section', () => {
const message = createAssistantMessage([
{
type: 'text',
text: '<think>先确认 create/status/content 三个端点</think>请补充 status 返回 JSON',
},
])
const content = extractMessageContent(message)
expect(content.lines).toEqual(['请补充 status 返回 JSON'])
expect(content.reasoningLines).toEqual(['先确认 create/status/content 三个端点'])
})
it('extracts reasoning from unclosed think tag during streaming', () => {
const message = createAssistantMessage([
{
type: 'text',
text: '<think>先确认任务状态枚举和输出路径',
},
])
const content = extractMessageContent(message)
expect(content.lines).toEqual([])
expect(content.reasoningLines).toEqual(['先确认任务状态枚举和输出路径'])
})
it('preserves tool output and issues as visible lines', () => {
const message = createAssistantMessage([
{
type: 'tool-saveModelTemplate',
state: 'output-available',
output: {
message: '模型已保存',
issues: [{ field: 'response.statusPath', message: 'missing' }],
},
},
])
const content = extractMessageContent(message)
expect(content.lines).toEqual(['模型已保存', 'response.statusPath: missing'])
expect(content.reasoningLines).toEqual([])
})
})
@@ -0,0 +1,22 @@
import { describe, expect, it } from 'vitest'
import { PRESET_MODELS, PRESET_PROVIDERS } from '@/app/[locale]/profile/components/api-config/types'
describe('api-config minimax preset', () => {
it('uses official minimax baseUrl in preset provider', () => {
const minimaxProvider = PRESET_PROVIDERS.find((provider) => provider.id === 'minimax')
expect(minimaxProvider).toBeDefined()
expect(minimaxProvider?.baseUrl).toBe('https://api.minimaxi.com/v1')
})
it('includes all required minimax official llm preset models', () => {
const minimaxLlmModelIds = PRESET_MODELS
.filter((model) => model.provider === 'minimax' && model.type === 'llm')
.map((model) => model.modelId)
expect(minimaxLlmModelIds).toContain('MiniMax-M2.5')
expect(minimaxLlmModelIds).toContain('MiniMax-M2.5-highspeed')
expect(minimaxLlmModelIds).toContain('MiniMax-M2.1')
expect(minimaxLlmModelIds).toContain('MiniMax-M2.1-highspeed')
expect(minimaxLlmModelIds).toContain('MiniMax-M2')
})
})
@@ -0,0 +1,62 @@
import { describe, expect, it } from 'vitest'
import {
PRESET_MODELS,
encodeModelKey,
isPresetComingSoonModel,
isPresetComingSoonModelKey,
} from '@/app/[locale]/profile/components/api-config/types'
describe('api-config preset coming soon', () => {
it('registers Nano Banana 2 under Google AI Studio presets', () => {
const model = PRESET_MODELS.find(
(entry) => entry.provider === 'google' && entry.modelId === 'gemini-3.1-flash-image-preview',
)
expect(model).toBeDefined()
expect(model?.name).toBe('Nano Banana 2')
})
it('registers Seedance 2.0 and Seedance 2.0 Fast as preset video models', () => {
const modelIds = PRESET_MODELS
.filter((entry) => entry.provider === 'ark' && entry.type === 'video')
.map((entry) => entry.modelId)
expect(modelIds).toEqual(expect.arrayContaining([
'doubao-seedance-2-0-260128',
'doubao-seedance-2-0-fast-260128',
]))
})
it('does not mark live preset models as coming soon', () => {
const modelKey = encodeModelKey('ark', 'doubao-seedance-2-0-260128')
expect(isPresetComingSoonModel('ark', 'doubao-seedance-2-0-260128')).toBe(false)
expect(isPresetComingSoonModelKey(modelKey)).toBe(false)
})
it('does not mark normal preset models as coming soon', () => {
const modelKey = encodeModelKey('ark', 'doubao-seedance-2-0-fast-260128')
expect(isPresetComingSoonModel('ark', 'doubao-seedance-2-0-fast-260128')).toBe(false)
expect(isPresetComingSoonModelKey(modelKey)).toBe(false)
})
it('keeps existing live preset models non-coming-soon', () => {
const modelKey = encodeModelKey('ark', 'doubao-seedance-1-5-pro-251215')
expect(isPresetComingSoonModel('ark', 'doubao-seedance-1-5-pro-251215')).toBe(false)
expect(isPresetComingSoonModelKey(modelKey)).toBe(false)
})
it('registers Bailian Wan i2v preset models', () => {
const modelIds = PRESET_MODELS
.filter((entry) => entry.provider === 'bailian' && entry.type === 'video')
.map((entry) => entry.modelId)
expect(modelIds).toEqual(expect.arrayContaining([
'wan2.7-i2v',
'wan2.6-i2v-flash',
'wan2.6-i2v',
'wan2.5-i2v-preview',
'wan2.2-i2v-plus',
'wan2.2-kf2v-flash',
'wanx2.1-kf2v-plus',
]))
})
})
@@ -0,0 +1,49 @@
import { describe, expect, it } from 'vitest'
import { getAssistantSavedModelLabel } from '@/app/[locale]/profile/components/api-config/provider-card/hooks/useProviderCardState'
describe('provider card assistant saved label', () => {
it('prefers draft model name when available', () => {
const label = getAssistantSavedModelLabel({
savedModelKey: 'openai-compatible:oa-1::veo_3_1-fast-4K',
draftModel: {
modelId: 'veo_3_1-fast-4K',
name: 'Veo 3.1 Fast 4K',
type: 'video',
provider: 'openai-compatible:oa-1',
compatMediaTemplate: {
version: 1,
mediaType: 'video',
mode: 'async',
create: {
method: 'POST',
path: '/v1/video/create',
},
status: {
method: 'GET',
path: '/v1/video/query?id={{task_id}}',
},
response: {
taskIdPath: '$.id',
statusPath: '$.status',
},
polling: {
intervalMs: 5000,
timeoutMs: 600000,
doneStates: ['completed'],
failStates: ['failed'],
},
},
},
})
expect(label).toBe('Veo 3.1 Fast 4K')
})
it('falls back to model id parsed from savedModelKey', () => {
const label = getAssistantSavedModelLabel({
savedModelKey: 'openai-compatible:oa-1::veo_3_1-fast-4K',
})
expect(label).toBe('veo_3_1-fast-4K')
})
})
@@ -0,0 +1,173 @@
import { describe, expect, it } from 'vitest'
import {
getAddableModelTypesForProvider,
getVisibleModelTypesForProvider,
shouldShowOpenAICompatVideoHint,
} from '@/app/[locale]/profile/components/api-config/provider-card/ProviderAdvancedFields'
import {
buildCustomPricingFromModelForm,
buildProviderConnectionPayload,
} from '@/app/[locale]/profile/components/api-config/provider-card/hooks/useProviderCardState'
describe('provider card pricing form behavior', () => {
it('allows openai-compatible provider to add llm/image/video', () => {
expect(getAddableModelTypesForProvider('openai-compatible:oa-1')).toEqual(['llm', 'image', 'video'])
})
it('shows llm/image/video tabs by default for openai-compatible even with only image models', () => {
const visible = getVisibleModelTypesForProvider(
'openai-compatible:oa-1',
{
image: [
{
modelId: 'gpt-image-1',
modelKey: 'openai-compatible:oa-1::gpt-image-1',
name: 'Image',
type: 'image',
provider: 'openai-compatible:oa-1',
price: 0,
enabled: true,
},
],
},
)
expect(visible).toEqual(['llm', 'image', 'video'])
})
it('shows the openai-compatible video hint only for openai-compatible video add forms', () => {
expect(shouldShowOpenAICompatVideoHint('openai-compatible:oa-1', 'video')).toBe(true)
expect(shouldShowOpenAICompatVideoHint('openai-compatible:oa-1', 'image')).toBe(false)
expect(shouldShowOpenAICompatVideoHint('gemini-compatible:gm-1', 'video')).toBe(false)
expect(shouldShowOpenAICompatVideoHint('ark', 'video')).toBe(false)
})
it('keeps payload without customPricing when pricing toggle is off', () => {
const result = buildCustomPricingFromModelForm(
'image',
{
name: 'Image',
modelId: 'gpt-image-1',
enableCustomPricing: false,
basePrice: '0.8',
},
{ needsCustomPricing: true },
)
expect(result).toEqual({ ok: true })
})
it('builds llm customPricing payload when pricing toggle is on', () => {
const result = buildCustomPricingFromModelForm(
'llm',
{
name: 'GPT',
modelId: 'gpt-4.1',
enableCustomPricing: true,
priceInput: '2.5',
priceOutput: '8',
},
{ needsCustomPricing: true },
)
expect(result).toEqual({
ok: true,
customPricing: {
llm: {
inputPerMillion: 2.5,
outputPerMillion: 8,
},
},
})
})
it('builds media customPricing payload with option prices when enabled', () => {
const result = buildCustomPricingFromModelForm(
'video',
{
name: 'Sora',
modelId: 'sora-2',
enableCustomPricing: true,
basePrice: '0.9',
optionPricesJson: '{"resolution":{"720x1280":0.1},"duration":{"8":0.4}}',
},
{ needsCustomPricing: true },
)
expect(result).toEqual({
ok: true,
customPricing: {
video: {
basePrice: 0.9,
optionPrices: {
resolution: {
'720x1280': 0.1,
},
duration: {
'8': 0.4,
},
},
},
},
})
})
it('rejects invalid media optionPrices JSON when enabled', () => {
const result = buildCustomPricingFromModelForm(
'image',
{
name: 'Image',
modelId: 'gpt-image-1',
enableCustomPricing: true,
basePrice: '0.3',
optionPricesJson: '{"resolution":{"1024x1024":"free"}}',
},
{ needsCustomPricing: true },
)
expect(result).toEqual({ ok: false, reason: 'invalid' })
})
it('bugfix: includes baseUrl for openai-compatible provider connection test payload', () => {
const payload = buildProviderConnectionPayload({
providerKey: 'openai-compatible',
apiKey: ' sk-test ',
baseUrl: ' https://api.openai-proxy.example/v1 ',
})
expect(payload).toEqual({
apiType: 'openai-compatible',
apiKey: 'sk-test',
baseUrl: 'https://api.openai-proxy.example/v1',
})
})
it('omits baseUrl for non-compatible provider connection test payload', () => {
const payload = buildProviderConnectionPayload({
providerKey: 'ark',
apiKey: ' ark-key ',
baseUrl: ' https://ignored.example/v1 ',
})
expect(payload).toEqual({
apiType: 'ark',
apiKey: 'ark-key',
})
})
it('includes llmModel in provider connection test payload when configured', () => {
const payload = buildProviderConnectionPayload({
providerKey: 'openai-compatible',
apiKey: ' sk-test ',
baseUrl: ' https://compat.example.com/v1 ',
llmModel: ' gpt-4.1-mini ',
})
expect(payload).toEqual({
apiType: 'openai-compatible',
apiKey: 'sk-test',
baseUrl: 'https://compat.example.com/v1',
llmModel: 'gpt-4.1-mini',
})
})
})
@@ -0,0 +1,83 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
import type { CustomModel } from '@/app/[locale]/profile/components/api-config/types'
import {
probeModelLlmProtocolViaApi,
shouldProbeModelLlmProtocol,
shouldReprobeModelLlmProtocol,
} from '@/app/[locale]/profile/components/api-config/provider-card/hooks/useProviderCardState'
describe('api-config provider-card protocol probe helpers', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('only probes openai-compatible llm models', () => {
expect(shouldProbeModelLlmProtocol({ providerId: 'openai-compatible:oa-1', modelType: 'llm' })).toBe(true)
expect(shouldProbeModelLlmProtocol({ providerId: 'openai-compatible:oa-1', modelType: 'image' })).toBe(false)
expect(shouldProbeModelLlmProtocol({ providerId: 'gemini-compatible:gm-1', modelType: 'llm' })).toBe(false)
})
it('re-probes only when modelId/provider changed on openai-compatible llm', () => {
const originalModel: CustomModel = {
modelId: 'gpt-4.1-mini',
modelKey: 'openai-compatible:oa-1::gpt-4.1-mini',
name: 'GPT 4.1 Mini',
type: 'llm',
provider: 'openai-compatible:oa-1',
llmProtocol: 'chat-completions',
llmProtocolCheckedAt: '2026-01-01T00:00:00.000Z',
price: 0,
enabled: true,
}
expect(shouldReprobeModelLlmProtocol({
providerId: 'openai-compatible:oa-1',
originalModel,
nextModelId: 'gpt-4.1-mini',
})).toBe(false)
expect(shouldReprobeModelLlmProtocol({
providerId: 'openai-compatible:oa-1',
originalModel,
nextModelId: 'gpt-4.1',
})).toBe(true)
expect(shouldReprobeModelLlmProtocol({
providerId: 'gemini-compatible:gm-1',
originalModel,
nextModelId: 'gpt-4.1',
})).toBe(false)
})
it('parses successful probe response payload', async () => {
const fetchMock = vi.fn(async () => new Response(JSON.stringify({
success: true,
protocol: 'responses',
checkedAt: '2026-03-05T10:00:00.000Z',
}), { status: 200 }))
vi.stubGlobal('fetch', fetchMock)
const result = await probeModelLlmProtocolViaApi({
providerId: 'openai-compatible:oa-1',
modelId: 'gpt-4.1-mini',
})
expect(result).toEqual({
llmProtocol: 'responses',
llmProtocolCheckedAt: '2026-03-05T10:00:00.000Z',
})
})
it('throws probe failure code on unsuccessful probe response', async () => {
const fetchMock = vi.fn(async () => new Response(JSON.stringify({
success: false,
code: 'PROBE_INCONCLUSIVE',
}), { status: 200 }))
vi.stubGlobal('fetch', fetchMock)
await expect(probeModelLlmProtocolViaApi({
providerId: 'openai-compatible:oa-1',
modelId: 'gpt-4.1-mini',
})).rejects.toThrow('PROBE_INCONCLUSIVE')
})
})
@@ -0,0 +1,25 @@
import { describe, expect, it } from 'vitest'
import { getCompatibilityLayerBadgeLabel } from '@/app/[locale]/profile/components/api-config/provider-card/ProviderCardShell'
describe('provider card shell compatibility layer badge', () => {
const t = (key: string): string => {
if (key === 'compatibilityLayerOpenAI') return 'OpenAI 兼容层'
if (key === 'compatibilityLayerGemini') return 'Gemini 兼容层'
return key
}
it('shows OpenAI compatible layer label for openai-compatible providers', () => {
expect(getCompatibilityLayerBadgeLabel('openai-compatible:oa-1', t)).toBe('OpenAI 兼容层')
})
it('shows Gemini compatible layer label for gemini-compatible providers', () => {
expect(getCompatibilityLayerBadgeLabel('gemini-compatible:gm-1', t)).toBe('Gemini 兼容层')
})
it('does not show compatibility label for preset providers', () => {
expect(getCompatibilityLayerBadgeLabel('google', t)).toBeNull()
expect(getCompatibilityLayerBadgeLabel('ark', t)).toBeNull()
expect(getCompatibilityLayerBadgeLabel('bailian', t)).toBeNull()
expect(getCompatibilityLayerBadgeLabel('siliconflow', t)).toBeNull()
})
})
@@ -0,0 +1,182 @@
import * as React from 'react'
import { createElement } from 'react'
import { renderToStaticMarkup } from 'react-dom/server'
import { afterEach, describe, expect, it, vi } from 'vitest'
import type { UseProviderCardStateResult } from '@/app/[locale]/profile/components/api-config/provider-card/hooks/useProviderCardState'
import { ProviderCardShell } from '@/app/[locale]/profile/components/api-config/provider-card/ProviderCardShell'
import type { ProviderTutorial } from '@/app/[locale]/profile/components/api-config/types'
const portalMocks = vi.hoisted(() => {
return {
currentPortalTarget: null as unknown,
createPortalMock: vi.fn((node: React.ReactNode, target: unknown) => {
const targetLabel = target === portalMocks.currentPortalTarget ? 'body' : 'unknown'
return createElement('div', { 'data-portal-target': targetLabel }, node)
}),
}
})
vi.mock('react-dom', async () => {
const actual = await vi.importActual<typeof import('react-dom')>('react-dom')
return {
...actual,
createPortal: portalMocks.createPortalMock,
}
})
function createState(tutorial: ProviderTutorial): UseProviderCardStateResult {
return {
providerKey: 'ark',
isPresetProvider: true,
showBaseUrlEdit: false,
tutorial,
groupedModels: {},
hasModels: false,
isEditing: false,
isEditingUrl: false,
showKey: false,
tempKey: '',
tempUrl: '',
showTutorial: true,
showAddForm: null,
newModel: {
name: '',
modelId: '',
enableCustomPricing: false,
priceInput: '',
priceOutput: '',
basePrice: '',
optionPricesJson: '',
},
batchMode: false,
editingModelId: null,
editModel: {
name: '',
modelId: '',
enableCustomPricing: false,
priceInput: '',
priceOutput: '',
basePrice: '',
optionPricesJson: '',
},
maskedKey: '',
isPresetModel: () => false,
isDefaultModel: () => false,
setShowKey: () => undefined,
setShowTutorial: () => undefined,
setShowAddForm: () => undefined,
setBatchMode: () => undefined,
setNewModel: () => undefined,
setEditModel: () => undefined,
setTempKey: () => undefined,
setTempUrl: () => undefined,
startEditKey: () => undefined,
startEditUrl: () => undefined,
handleSaveKey: () => Promise.resolve(),
handleCancelEdit: () => undefined,
handleSaveUrl: () => undefined,
handleCancelUrlEdit: () => undefined,
handleEditModel: () => undefined,
handleCancelEditModel: () => undefined,
handleSaveModel: () => Promise.resolve(),
handleAddModel: () => Promise.resolve(),
handleCancelAdd: () => undefined,
needsCustomPricing: false,
keyTestStatus: 'idle',
keyTestSteps: [],
handleForceSaveKey: () => undefined,
handleTestOnly: () => undefined,
handleDismissTest: () => undefined,
isModelSavePending: false,
assistantEnabled: false,
isAssistantOpen: false,
assistantSavedEvent: null,
assistantChat: {
messages: [],
input: '',
status: 'ready',
pending: false,
error: undefined,
setInput: () => undefined,
send: async () => undefined,
clear: () => undefined,
},
openAssistant: () => undefined,
closeAssistant: () => undefined,
handleAssistantSend: () => Promise.resolve(),
}
}
function ProviderCardShellWithBody(
props: Omit<React.ComponentProps<typeof ProviderCardShell>, 'children'>,
): React.ReactElement {
const ProviderCardShellComponent =
ProviderCardShell as unknown as React.ComponentType<
React.PropsWithChildren<Omit<React.ComponentProps<typeof ProviderCardShell>, 'children'>>
>
return createElement(
ProviderCardShellComponent,
props,
createElement('div', null, 'provider-body'),
)
}
describe('ProviderCardShell tutorial modal', () => {
afterEach(() => {
vi.clearAllMocks()
portalMocks.currentPortalTarget = null
Reflect.deleteProperty(globalThis, 'React')
Reflect.deleteProperty(globalThis, 'document')
})
it('mounts the tutorial modal through a portal to document.body', () => {
const fakeDocument = {
body: { nodeName: 'BODY' },
}
Reflect.set(globalThis, 'React', React)
portalMocks.currentPortalTarget = fakeDocument.body
Reflect.set(globalThis, 'document', fakeDocument)
const tutorial: ProviderTutorial = {
providerId: 'ark',
steps: [
{
text: 'ark_step1',
url: 'https://example.com/ark-key',
},
],
}
const state = createState(tutorial)
const t = (key: string): string => {
if (key === 'tutorial.button') return '开通教程'
if (key === 'tutorial.title') return '开通教程'
if (key === 'tutorial.subtitle') return '按照以下步骤完成配置'
if (key === 'tutorial.steps.ark_step1') return '进入控制台创建 API Key'
if (key === 'tutorial.openLink') return '点击打开'
if (key === 'tutorial.close') return '关闭'
return key
}
const html = renderToStaticMarkup(
createElement(
ProviderCardShellWithBody,
{
provider: {
id: 'ark',
name: '阿里云百炼',
hasApiKey: true,
},
onDeleteProvider: () => undefined,
t,
state,
},
),
)
expect(portalMocks.createPortalMock).toHaveBeenCalledTimes(1)
expect(portalMocks.createPortalMock.mock.calls[0]?.[1]).toBe(fakeDocument.body)
expect(html).toContain('data-portal-target="body"')
expect(html).toContain('进入控制台创建 API Key')
expect(html).toContain('href="https://example.com/ark-key"')
})
})
@@ -0,0 +1,119 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
vi.mock('react', async () => {
const actual = await vi.importActual<typeof import('react')>('react')
return {
...actual,
useMemo: <T,>(factory: () => T) => factory(),
}
})
import { useApiConfigFilters } from '@/app/[locale]/profile/components/api-config-tab/hooks/useApiConfigFilters'
import type { CustomModel, Provider } from '@/app/[locale]/profile/components/api-config/types'
describe('api config filters', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('merges audio providers into modelProviders and removes audioProviders output', () => {
const providers: Provider[] = [
{ id: 'fal', name: 'FAL', hasApiKey: true, apiKey: 'k-fal' },
{ id: 'bailian', name: 'Alibaba Bailian', hasApiKey: true, apiKey: 'k-bl' },
]
const models: CustomModel[] = [
{
modelId: 'fal-ai/index-tts-2/text-to-speech',
modelKey: 'fal::fal-ai/index-tts-2/text-to-speech',
name: 'IndexTTS 2',
type: 'audio',
provider: 'fal',
price: 0,
enabled: true,
},
{
modelId: 'qwen3-tts-vd-2026-01-26',
modelKey: 'bailian::qwen3-tts-vd-2026-01-26',
name: 'Qwen3 TTS',
type: 'audio',
provider: 'bailian',
price: 0,
enabled: true,
},
{
modelId: 'qwen-voice-design',
modelKey: 'bailian::qwen-voice-design',
name: 'Qwen Voice Design',
type: 'audio',
provider: 'bailian',
price: 0,
enabled: true,
},
{
modelId: 'qwen3.5-flash',
modelKey: 'bailian::qwen3.5-flash',
name: 'Qwen 3.5 Flash',
type: 'llm',
provider: 'bailian',
price: 0,
enabled: true,
},
]
const result = useApiConfigFilters({ providers, models })
const providerIds = result.modelProviders.map((provider) => provider.id)
const audioDefaultIds = result.getEnabledModelsByType('audio').map((model) => model.modelId)
expect(providerIds).toEqual(['fal', 'bailian'])
expect(audioDefaultIds).toEqual(expect.arrayContaining([
'fal-ai/index-tts-2/text-to-speech',
'qwen3-tts-vd-2026-01-26',
]))
expect(audioDefaultIds).not.toContain('qwen-voice-design')
expect(Object.prototype.hasOwnProperty.call(result, 'audioProviders')).toBe(false)
})
it('keeps modelProviders order aligned with providers input order', () => {
const providers: Provider[] = [
{ id: 'google', name: 'Google AI Studio', hasApiKey: true, apiKey: 'k-google' },
{ id: 'openai-compatible:oa-2', name: 'OpenAI B', hasApiKey: true, apiKey: 'k-oa2' },
{ id: 'ark', name: 'Volcengine Ark', hasApiKey: true, apiKey: 'k-ark' },
]
const models: CustomModel[] = [
{
modelId: 'gemini-3.1-pro-preview',
modelKey: 'google::gemini-3.1-pro-preview',
name: 'Gemini 3.1 Pro',
type: 'llm',
provider: 'google',
price: 0,
enabled: true,
},
{
modelId: 'gpt-4.1',
modelKey: 'openai-compatible:oa-2::gpt-4.1',
name: 'GPT 4.1',
type: 'llm',
provider: 'openai-compatible:oa-2',
price: 0,
enabled: true,
},
{
modelId: 'doubao-seed-2-0-pro-260215',
modelKey: 'ark::doubao-seed-2-0-pro-260215',
name: 'Doubao Seed 2.0 Pro',
type: 'llm',
provider: 'ark',
price: 0,
enabled: true,
},
]
const result = useApiConfigFilters({ providers, models })
expect(result.modelProviders.map((provider) => provider.id)).toEqual([
'google',
'openai-compatible:oa-2',
'ark',
])
})
})
@@ -0,0 +1,100 @@
import type { UIMessage } from 'ai'
import { describe, expect, it } from 'vitest'
import { collectSavedEvents } from '@/components/assistant/useAssistantChat'
describe('assistant chat saved events parser', () => {
it('parses single save tool output event', () => {
const messages = [{
id: 'm1',
role: 'assistant',
parts: [{
type: 'tool-saveModelTemplate',
state: 'output-available',
output: {
status: 'saved',
savedModelKey: 'openai-compatible:oa-1::veo3-fast',
draftModel: {
modelId: 'veo3-fast',
name: 'Veo 3 Fast',
type: 'video',
provider: 'openai-compatible:oa-1',
compatMediaTemplate: {
version: 1,
mediaType: 'video',
mode: 'async',
create: { method: 'POST', path: '/video/create' },
status: { method: 'GET', path: '/video/query?id={{task_id}}' },
response: { taskIdPath: '$.id', statusPath: '$.status' },
polling: { intervalMs: 5000, timeoutMs: 600000, doneStates: ['completed'], failStates: ['failed'] },
},
},
},
}],
}] as unknown as UIMessage[]
const events = collectSavedEvents(messages)
expect(events).toHaveLength(1)
expect(events[0]?.savedModelKey).toBe('openai-compatible:oa-1::veo3-fast')
expect(events[0]?.draftModel?.modelId).toBe('veo3-fast')
})
it('parses batch save tool output events', () => {
const messages = [{
id: 'm2',
role: 'assistant',
parts: [{
type: 'tool-saveModelTemplates',
state: 'output-available',
output: {
status: 'saved',
savedModelKeys: [
'openai-compatible:oa-1::veo3-fast',
'openai-compatible:oa-1::veo3.1-fast',
],
draftModels: [
{
modelId: 'veo3-fast',
name: 'Veo 3 Fast',
type: 'video',
provider: 'openai-compatible:oa-1',
compatMediaTemplate: {
version: 1,
mediaType: 'video',
mode: 'async',
create: { method: 'POST', path: '/video/create' },
status: { method: 'GET', path: '/video/query?id={{task_id}}' },
response: { taskIdPath: '$.id', statusPath: '$.status' },
polling: { intervalMs: 5000, timeoutMs: 600000, doneStates: ['completed'], failStates: ['failed'] },
},
},
{
modelId: 'veo3.1-fast',
name: 'Veo 3.1 Fast',
type: 'video',
provider: 'openai-compatible:oa-1',
compatMediaTemplate: {
version: 1,
mediaType: 'video',
mode: 'async',
create: { method: 'POST', path: '/video/create' },
status: { method: 'GET', path: '/video/query?id={{task_id}}' },
response: { taskIdPath: '$.id', statusPath: '$.status' },
polling: { intervalMs: 5000, timeoutMs: 600000, doneStates: ['completed'], failStates: ['failed'] },
},
},
],
},
}],
}] as unknown as UIMessage[]
const events = collectSavedEvents(messages)
expect(events).toHaveLength(2)
expect(events.map((item) => item.savedModelKey)).toEqual([
'openai-compatible:oa-1::veo3-fast',
'openai-compatible:oa-1::veo3.1-fast',
])
expect(events[1]?.draftModel?.name).toBe('Veo 3.1 Fast')
})
})
@@ -0,0 +1,65 @@
import { describe, expect, it } from 'vitest'
import { mergeProvidersForDisplay } from '@/app/[locale]/profile/components/api-config/hooks'
import type { Provider } from '@/app/[locale]/profile/components/api-config/types'
describe('useProviders provider order merge', () => {
it('preserves saved providers order and appends missing presets at the end', () => {
const presetProviders: Provider[] = [
{ id: 'ark', name: '火山引擎 Ark' },
{ id: 'google', name: 'Google AI Studio' },
{ id: 'bailian', name: '阿里云百炼' },
]
const savedProviders: Provider[] = [
{ id: 'google', name: 'Google Legacy Name', apiKey: 'google-key', hidden: true },
{ id: 'openai-compatible:oa-2', name: 'OpenAI B', baseUrl: 'https://oa-b.test', apiKey: 'oa-key' },
{ id: 'ark', name: 'Ark Legacy Name', apiKey: 'ark-key' },
]
const merged = mergeProvidersForDisplay(savedProviders, presetProviders)
expect(merged.map((provider) => provider.id)).toEqual([
'google',
'openai-compatible:oa-2',
'ark',
'bailian',
])
expect(merged[0]?.hidden).toBe(true)
})
it('uses preset localized names for preset providers while keeping apiKey/baseUrl from saved data', () => {
const presetProviders: Provider[] = [
{ id: 'google', name: 'Google AI Studio', baseUrl: 'https://google.default' },
]
const savedProviders: Provider[] = [
{ id: 'google', name: 'Google Old Name', baseUrl: 'https://google.custom', apiKey: 'google-key' },
]
const merged = mergeProvidersForDisplay(savedProviders, presetProviders)
expect(merged).toHaveLength(1)
expect(merged[0]).toMatchObject({
id: 'google',
name: 'Google AI Studio',
baseUrl: 'https://google.custom',
apiKey: 'google-key',
hasApiKey: true,
})
})
it('uses preset official baseUrl for minimax even when saved payload contains a custom baseUrl', () => {
const presetProviders: Provider[] = [
{ id: 'minimax', name: 'MiniMax Hailuo', baseUrl: 'https://api.minimaxi.com/v1' },
]
const savedProviders: Provider[] = [
{ id: 'minimax', name: 'MiniMax Legacy', baseUrl: 'https://custom.minimax.proxy/v1', apiKey: 'mm-key' },
]
const merged = mergeProvidersForDisplay(savedProviders, presetProviders)
expect(merged).toHaveLength(1)
expect(merged[0]).toMatchObject({
id: 'minimax',
name: 'MiniMax Hailuo',
baseUrl: 'https://api.minimaxi.com/v1',
apiKey: 'mm-key',
hasApiKey: true,
})
})
})
@@ -0,0 +1,99 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const prismaMock = vi.hoisted(() => ({
$queryRaw: vi.fn(),
$executeRaw: vi.fn(),
$transaction: vi.fn(),
locationImage: { createMany: vi.fn() },
globalLocationImage: { createMany: vi.fn() },
}))
vi.mock('@/lib/prisma', () => ({
prisma: prismaMock,
}))
describe('location-backed assets service', () => {
beforeEach(() => {
vi.clearAllMocks()
prismaMock.$queryRaw
.mockResolvedValueOnce([
{
id: 'location-1',
novelPromotionProjectId: 'novel-project-1',
name: 'Bronze Dagger',
summary: 'Old bronze dagger',
selectedImageId: null,
sourceGlobalLocationId: null,
assetKind: 'prop',
},
])
.mockResolvedValueOnce([])
})
it('queries project location-backed assets with real schema column names', async () => {
const mod = await import('@/lib/assets/services/location-backed-assets')
await mod.listProjectLocationBackedAssets('novel-project-1', 'prop')
const assetQuery = prismaMock.$queryRaw.mock.calls[0]?.[0] as { strings?: ReadonlyArray<string>; sql?: string }
const imageQuery = prismaMock.$queryRaw.mock.calls[1]?.[0] as { strings?: ReadonlyArray<string>; sql?: string }
const assetSql = assetQuery.strings?.join(' ') ?? assetQuery.sql ?? ''
const imageSql = imageQuery.strings?.join(' ') ?? imageQuery.sql ?? ''
expect(assetSql).toContain('FROM novel_promotion_locations')
expect(assetSql).toContain('novelPromotionProjectId')
expect(assetSql).not.toContain('projectId')
expect(imageSql).toContain('FROM location_images')
expect(imageSql).toContain('NULL AS previousImageMediaId')
})
it('seeds an initial project image slot when creating a prop asset', async () => {
const mod = await import('@/lib/assets/services/location-backed-assets')
const result = await mod.createProjectLocationBackedAsset({
novelPromotionProjectId: 'novel-project-1',
name: 'Bronze Dagger',
summary: 'Old bronze dagger',
initialDescription: 'A bronze dagger with a carved handle and weathered blade',
kind: 'prop',
})
expect(prismaMock.locationImage.createMany).toHaveBeenCalledWith({
data: [
{
locationId: result.id,
imageIndex: 0,
description: 'A bronze dagger with a carved handle and weathered blade',
availableSlots: '[]',
},
],
})
})
it('seeds multiple project image slots when explicit descriptions are provided', async () => {
const mod = await import('@/lib/assets/services/location-backed-assets')
await mod.seedProjectLocationBackedImageSlots({
locationId: 'location-1',
descriptions: ['Night street', 'Rainy alley'],
fallbackDescription: 'Night street',
})
expect(prismaMock.locationImage.createMany).toHaveBeenCalledWith({
data: [
{
locationId: 'location-1',
imageIndex: 0,
description: 'Night street',
availableSlots: '[]',
},
{
locationId: 'location-1',
imageIndex: 1,
description: 'Rainy alley',
availableSlots: '[]',
},
],
})
})
})
@@ -0,0 +1,37 @@
import { describe, expect, it } from 'vitest'
import { canGenerateLocationBackedAsset, resolveLocationBackedGenerateType } from '@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/assets/location-backed-asset'
describe('location-backed asset generation rules', () => {
it('requires props to have a visual description before generation', () => {
expect(canGenerateLocationBackedAsset({
id: 'prop-1',
name: '金箍棒',
summary: '一根两头包裹金片的黑铁长棍',
images: [],
}, 'prop')).toBe(false)
})
it('allows locations to generate from seeded image descriptions', () => {
expect(canGenerateLocationBackedAsset({
id: 'location-1',
name: '雨夜街道',
summary: null,
images: [
{
id: 'image-1',
imageIndex: 0,
description: '潮湿反光的老街',
imageUrl: null,
previousImageUrl: null,
previousDescription: null,
isSelected: false,
},
],
}, 'location')).toBe(true)
})
it('routes prop generation through the prop branch', () => {
expect(resolveLocationBackedGenerateType('prop')).toBe('prop')
expect(resolveLocationBackedGenerateType('location')).toBe('location')
})
})
+131
View File
@@ -0,0 +1,131 @@
import { describe, expect, it } from 'vitest'
import { mapGlobalVoiceToAsset, mapProjectCharacterToAsset, mapProjectPropToAsset } from '@/lib/assets/mappers'
import { groupAssetsByKind } from '@/lib/assets/grouping'
describe('asset mappers', () => {
it('maps project characters into the unified character asset contract', () => {
const asset = mapProjectCharacterToAsset({
id: 'character-1',
name: '林夏',
introduction: '主角',
profileData: JSON.stringify({ archetype: 'lead' }),
voiceType: 'custom',
voiceId: 'voice-1',
customVoiceUrl: 'https://example.com/voice.mp3',
media: null,
profileConfirmed: true,
appearances: [
{
id: 'appearance-1',
appearanceIndex: 0,
changeReason: '初始形象',
description: '短发,风衣',
imageUrl: 'https://example.com/char.jpg',
media: null,
imageUrls: ['https://example.com/char.jpg'],
imageMedias: [],
selectedIndex: 0,
previousImageUrl: null,
previousMedia: null,
previousImageUrls: [],
previousImageMedias: [],
},
],
})
expect(asset).toEqual(expect.objectContaining({
id: 'character-1',
scope: 'project',
kind: 'character',
introduction: '主角',
profileData: JSON.stringify({ archetype: 'lead' }),
profileConfirmed: true,
voice: expect.objectContaining({
voiceType: 'custom',
voiceId: 'voice-1',
}),
}))
expect(asset.variants[0]).toEqual(expect.objectContaining({
id: 'appearance-1',
index: 0,
label: '初始形象',
}))
})
it('maps global voices into the unified audio asset contract', () => {
const asset = mapGlobalVoiceToAsset({
id: 'voice-1',
name: '旁白',
description: '低沉稳重',
voiceId: 'voice-provider-1',
voiceType: 'designed',
customVoiceUrl: 'https://example.com/voice.mp3',
media: null,
voicePrompt: '低沉稳重',
gender: 'male',
language: 'zh',
folderId: 'folder-1',
})
expect(asset).toEqual(expect.objectContaining({
id: 'voice-1',
scope: 'global',
kind: 'voice',
voiceMeta: expect.objectContaining({
voiceType: 'designed',
gender: 'male',
language: 'zh',
}),
}))
})
it('maps project props into the unified visual asset contract and groups them by kind', () => {
const propAsset = mapProjectPropToAsset({
id: 'prop-1',
name: '青铜匕首',
summary: '古旧短刃,雕纹手柄',
images: [
{
id: 'prop-image-1',
imageIndex: 0,
description: '古旧短刃,雕纹手柄',
imageUrl: 'https://example.com/prop.jpg',
media: null,
previousImageUrl: null,
previousMedia: null,
isSelected: true,
},
],
})
const voiceAsset = mapGlobalVoiceToAsset({
id: 'voice-1',
name: '旁白',
description: '低沉稳重',
voiceId: 'voice-provider-1',
voiceType: 'designed',
customVoiceUrl: 'https://example.com/voice.mp3',
media: null,
voicePrompt: '低沉稳重',
gender: 'male',
language: 'zh',
folderId: 'folder-1',
})
expect(propAsset).toEqual(expect.objectContaining({
id: 'prop-1',
scope: 'project',
kind: 'prop',
summary: '古旧短刃,雕纹手柄',
selectedVariantId: 'prop-image-1',
}))
expect(propAsset.variants[0]).toEqual(expect.objectContaining({
id: 'prop-image-1',
index: 0,
description: '古旧短刃,雕纹手柄',
}))
const groups = groupAssetsByKind([propAsset, voiceAsset])
expect(groups.prop.map((asset) => asset.id)).toEqual(['prop-1'])
expect(groups.voice.map((asset) => asset.id)).toEqual(['voice-1'])
})
})
@@ -0,0 +1,127 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const deleteObjectMock = vi.hoisted(() => vi.fn())
const resolveStorageKeyFromMediaValueMock = vi.hoisted(() => vi.fn())
const prismaMock = vi.hoisted(() => ({
novelPromotionLocation: {
findUnique: vi.fn(),
update: vi.fn(),
},
locationImage: {
update: vi.fn(),
deleteMany: vi.fn(),
},
$transaction: vi.fn(),
}))
vi.mock('@/lib/prisma', () => ({
prisma: prismaMock,
}))
vi.mock('@/lib/storage', () => ({
deleteObject: deleteObjectMock,
}))
vi.mock('@/lib/media/service', () => ({
resolveStorageKeyFromMediaValue: resolveStorageKeyFromMediaValueMock,
}))
describe('project location-backed selection service', () => {
beforeEach(() => {
vi.clearAllMocks()
prismaMock.$transaction.mockImplementation(async (
callback: (tx: {
locationImage: {
update: typeof prismaMock.locationImage.update
deleteMany: typeof prismaMock.locationImage.deleteMany
}
novelPromotionLocation: {
update: typeof prismaMock.novelPromotionLocation.update
}
}) => Promise<void>,
) => callback({
locationImage: prismaMock.locationImage,
novelPromotionLocation: prismaMock.novelPromotionLocation,
}))
resolveStorageKeyFromMediaValueMock.mockImplementation(async (value: string) => `key:${value}`)
deleteObjectMock.mockResolvedValue(undefined)
prismaMock.locationImage.deleteMany.mockResolvedValue({ count: 1 })
prismaMock.locationImage.update.mockResolvedValue(undefined)
prismaMock.novelPromotionLocation.update.mockResolvedValue(undefined)
})
it('confirms a prop selection by keeping only the selected render', async () => {
prismaMock.novelPromotionLocation.findUnique.mockResolvedValue({
id: 'prop-1',
selectedImageId: 'prop-image-2',
images: [
{
id: 'prop-image-1',
imageIndex: 0,
imageUrl: 'https://example.com/prop-1.png',
isSelected: false,
},
{
id: 'prop-image-2',
imageIndex: 1,
imageUrl: 'https://example.com/prop-2.png',
isSelected: true,
},
],
})
const mod = await import('@/lib/assets/services/project-location-backed-selection')
const result = await mod.confirmProjectLocationBackedSelection('prop-1')
expect(result).toEqual({ success: true })
expect(resolveStorageKeyFromMediaValueMock).toHaveBeenCalledWith('https://example.com/prop-1.png')
expect(deleteObjectMock).toHaveBeenCalledWith('key:https://example.com/prop-1.png')
expect(prismaMock.locationImage.deleteMany).toHaveBeenCalledWith({
where: {
locationId: 'prop-1',
id: { not: 'prop-image-2' },
},
})
expect(prismaMock.locationImage.update).toHaveBeenCalledWith({
where: { id: 'prop-image-2' },
data: {
imageIndex: 0,
isSelected: true,
},
})
expect(prismaMock.novelPromotionLocation.update).toHaveBeenCalledWith({
where: { id: 'prop-1' },
data: { selectedImageId: 'prop-image-2' },
})
})
it('fails explicitly when confirming without a selected prop render', async () => {
prismaMock.novelPromotionLocation.findUnique.mockResolvedValue({
id: 'prop-1',
selectedImageId: null,
images: [
{
id: 'prop-image-1',
imageIndex: 0,
imageUrl: 'https://example.com/prop-1.png',
isSelected: false,
},
{
id: 'prop-image-2',
imageIndex: 1,
imageUrl: 'https://example.com/prop-2.png',
isSelected: false,
},
],
})
const mod = await import('@/lib/assets/services/project-location-backed-selection')
await expect(mod.confirmProjectLocationBackedSelection('prop-1')).rejects.toMatchObject({
code: 'INVALID_PARAMS',
})
expect(prismaMock.locationImage.deleteMany).not.toHaveBeenCalled()
expect(deleteObjectMock).not.toHaveBeenCalled()
})
})
+53
View File
@@ -0,0 +1,53 @@
import { describe, expect, it } from 'vitest'
import { buildPromptAssetContext, compileAssetPromptFragments } from '@/lib/assets/services/asset-prompt-context'
describe('asset prompt context', () => {
it('compiles subject, environment, and prop prompt fragments from the centralized asset context', () => {
const context = buildPromptAssetContext({
characters: [
{
name: '小雨/雨',
appearances: [
{
changeReason: '初始形象',
descriptions: ['黑色短发,校服,冷静表情'],
selectedIndex: 0,
description: 'fallback description',
},
],
},
],
locations: [
{
name: '天台',
images: [
{
isSelected: true,
description: '夜晚天台,冷风,霓虹远景',
availableSlots: JSON.stringify([
'天台栏杆左侧靠近边缘的位置',
]),
},
],
},
],
props: [
{
name: '青铜匕首',
summary: '古旧短刃,雕纹手柄',
},
],
clipCharacters: [{ name: '雨' }],
clipLocation: '天台',
clipProps: ['青铜匕首'],
})
expect(compileAssetPromptFragments(context)).toEqual({
appearanceListText: '小雨/雨: ["初始形象"]',
fullDescriptionText: '【小雨/雨 - 初始形象】黑色短发,校服,冷静表情',
locationDescriptionText: '夜晚天台,冷风,霓虹远景\n\n可站位置:\n- 天台栏杆左侧靠近边缘的位置',
propsDescriptionText: '【青铜匕首】古旧短刃,雕纹手柄',
charactersIntroductionText: '暂无角色介绍',
})
})
})
+44
View File
@@ -0,0 +1,44 @@
import { describe, expect, it } from 'vitest'
import { assetKindRegistry, getAssetKindRegistration } from '@/lib/assets/kinds/registry'
describe('asset kind registry', () => {
it('declares the supported asset kinds with stable capability contracts', () => {
expect(Object.keys(assetKindRegistry)).toEqual(['character', 'location', 'prop', 'voice'])
expect(getAssetKindRegistration('character')).toEqual(expect.objectContaining({
kind: 'character',
family: 'visual',
supportsMultipleVariants: true,
supportsVoiceBinding: true,
capabilities: expect.objectContaining({
canGenerate: true,
canBindVoice: true,
}),
}))
expect(getAssetKindRegistration('location')).toEqual(expect.objectContaining({
kind: 'location',
family: 'visual',
supportsMultipleVariants: true,
supportsVoiceBinding: false,
}))
expect(getAssetKindRegistration('prop')).toEqual(expect.objectContaining({
kind: 'prop',
family: 'visual',
supportsMultipleVariants: true,
supportsVoiceBinding: false,
capabilities: expect.objectContaining({
canGenerate: true,
canSelectRender: true,
canCopyFromGlobal: true,
}),
}))
expect(getAssetKindRegistration('voice')).toEqual(expect.objectContaining({
kind: 'voice',
family: 'audio',
supportsMultipleVariants: false,
capabilities: expect.objectContaining({
canGenerate: false,
canSelectRender: false,
}),
}))
})
})
@@ -0,0 +1,15 @@
import { describe, expect, it } from 'vitest'
import { getAssistantSkill, isAssistantId } from '@/lib/assistant-platform'
describe('assistant-platform registry', () => {
it('recognizes supported assistant ids', () => {
expect(isAssistantId('api-config-template')).toBe(true)
expect(isAssistantId('tutorial')).toBe(true)
expect(isAssistantId('unknown')).toBe(false)
})
it('returns registered skills', () => {
expect(getAssistantSkill('api-config-template').id).toBe('api-config-template')
expect(getAssistantSkill('tutorial').id).toBe('tutorial')
})
})
@@ -0,0 +1,46 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const getUserModelConfigMock = vi.hoisted(() =>
vi.fn(async () => ({ analysisModel: null })),
)
vi.mock('@/lib/config-service', () => ({
getUserModelConfig: getUserModelConfigMock,
}))
import { AssistantPlatformError } from '@/lib/assistant-platform'
import { createAssistantChatResponse } from '@/lib/assistant-platform/runtime'
describe('assistant-platform runtime', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('throws invalid request when messages payload is malformed', async () => {
await expect(createAssistantChatResponse({
userId: 'user-1',
assistantId: 'api-config-template',
context: {},
messages: { invalid: true },
})).rejects.toMatchObject({
code: 'ASSISTANT_INVALID_REQUEST',
} as Partial<AssistantPlatformError>)
})
it('throws missing model when analysisModel is not configured', async () => {
await expect(createAssistantChatResponse({
userId: 'user-1',
assistantId: 'api-config-template',
context: {
providerId: 'openai-compatible:oa-1',
},
messages: [{
id: 'u1',
role: 'user',
parts: [{ type: 'text', text: 'hello' }],
}],
})).rejects.toMatchObject({
code: 'ASSISTANT_MODEL_NOT_CONFIGURED',
} as Partial<AssistantPlatformError>)
})
})
@@ -0,0 +1,230 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
import type { AssistantRuntimeContext } from '@/lib/assistant-platform'
const saveModelTemplateConfigurationMock = vi.hoisted(() =>
vi.fn(async () => ({ modelKey: 'openai-compatible:oa-1::veo3.1' })),
)
vi.mock('@/lib/user-api/model-template/save', () => ({
saveModelTemplateConfiguration: saveModelTemplateConfigurationMock,
}))
import { apiConfigTemplateSkill } from '@/lib/assistant-platform/skills/api-config-template'
function buildRuntimeContext(): AssistantRuntimeContext {
return {
userId: 'user-1',
assistantId: 'api-config-template',
context: {
providerId: 'openai-compatible:oa-1',
},
analysisModelKey: 'openrouter::gpt-5-mini',
resolvedModel: {
providerId: 'openrouter',
providerKey: 'openrouter',
modelId: 'gpt-5-mini',
},
}
}
describe('assistant-platform api-config-template skill', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('returns invalid when template fails schema validation', async () => {
const tools = apiConfigTemplateSkill.tools?.(buildRuntimeContext())
expect(tools).toBeTruthy()
const saveTool = tools?.saveModelTemplate
expect(saveTool).toBeTruthy()
if (!saveTool?.execute) {
throw new Error('saveModelTemplate.execute is required for test')
}
const result = await saveTool.execute({
modelId: 'veo3.1',
name: 'Veo 3.1',
type: 'video',
compatMediaTemplate: {
version: 1,
mediaType: 'video',
mode: 'async',
create: {
method: 'POST',
path: '/v2/videos/generations',
},
response: {
taskIdPath: '$.task_id',
},
},
}, {} as never)
expect(result.status).toBe('invalid')
expect(result.code).toBe('MODEL_TEMPLATE_INVALID')
expect(saveModelTemplateConfigurationMock).not.toHaveBeenCalled()
})
it('saves template when payload is valid', async () => {
const tools = apiConfigTemplateSkill.tools?.(buildRuntimeContext())
expect(tools).toBeTruthy()
const saveTool = tools?.saveModelTemplate
expect(saveTool).toBeTruthy()
if (!saveTool?.execute) {
throw new Error('saveModelTemplate.execute is required for test')
}
const result = await saveTool.execute({
modelId: 'veo3.1',
name: 'Veo 3.1',
type: 'video',
compatMediaTemplate: {
version: 1,
mediaType: 'video',
mode: 'async',
create: {
method: 'POST',
path: '/v2/videos/generations',
contentType: 'application/json',
bodyTemplate: {
model: '{{model}}',
prompt: '{{prompt}}',
},
},
status: {
method: 'GET',
path: '/v2/videos/generations/{{task_id}}',
},
response: {
taskIdPath: '$.task_id',
statusPath: '$.status',
outputUrlPath: '$.video_url',
},
polling: {
intervalMs: 3000,
timeoutMs: 180000,
doneStates: ['done'],
failStates: ['failed'],
},
},
}, {} as never)
expect(result.status).toBe('saved')
expect(result.savedModelKey).toBe('openai-compatible:oa-1::veo3.1')
expect(saveModelTemplateConfigurationMock).toHaveBeenCalledWith({
userId: 'user-1',
providerId: 'openai-compatible:oa-1',
modelId: 'veo3.1',
name: 'Veo 3.1',
type: 'video',
template: expect.objectContaining({
mediaType: 'video',
}),
source: 'ai',
})
})
it('saves multiple templates when batch payload is valid', async () => {
const tools = apiConfigTemplateSkill.tools?.(buildRuntimeContext())
expect(tools).toBeTruthy()
const batchTool = tools?.saveModelTemplates
expect(batchTool).toBeTruthy()
if (!batchTool?.execute) {
throw new Error('saveModelTemplates.execute is required for test')
}
const result = await batchTool.execute({
models: [
{
modelId: 'veo3-fast',
name: 'Veo 3 Fast',
type: 'video',
compatMediaTemplate: {
version: 1,
mediaType: 'video',
mode: 'async',
create: {
method: 'POST',
path: '/video/create',
contentType: 'application/json',
bodyTemplate: {
model: '{{model}}',
prompt: '{{prompt}}',
images: ['{{image}}'],
},
},
status: {
method: 'GET',
path: '/video/query?id={{task_id}}',
},
response: {
taskIdPath: '$.id',
statusPath: '$.status',
outputUrlPath: '$.video_url',
},
polling: {
intervalMs: 5000,
timeoutMs: 600000,
doneStates: ['completed'],
failStates: ['failed'],
},
},
},
{
modelId: 'veo3.1-fast',
name: 'Veo 3.1 Fast',
type: 'video',
compatMediaTemplate: {
version: 1,
mediaType: 'video',
mode: 'async',
create: {
method: 'POST',
path: '/video/create',
contentType: 'application/json',
bodyTemplate: {
model: '{{model}}',
prompt: '{{prompt}}',
images: ['{{image}}'],
},
},
status: {
method: 'GET',
path: '/video/query?id={{task_id}}',
},
response: {
taskIdPath: '$.id',
statusPath: '$.status',
outputUrlPath: '$.video_url',
},
polling: {
intervalMs: 5000,
timeoutMs: 600000,
doneStates: ['completed'],
failStates: ['failed'],
},
},
},
],
}, {} as never)
expect(result.status).toBe('saved')
expect(result.savedModelKeys).toHaveLength(2)
expect(saveModelTemplateConfigurationMock).toHaveBeenCalledTimes(2)
expect(saveModelTemplateConfigurationMock).toHaveBeenNthCalledWith(1, expect.objectContaining({
modelId: 'veo3-fast',
name: 'Veo 3 Fast',
providerId: 'openai-compatible:oa-1',
userId: 'user-1',
type: 'video',
source: 'ai',
}))
expect(saveModelTemplateConfigurationMock).toHaveBeenNthCalledWith(2, expect.objectContaining({
modelId: 'veo3.1-fast',
name: 'Veo 3.1 Fast',
providerId: 'openai-compatible:oa-1',
userId: 'user-1',
type: 'video',
source: 'ai',
}))
})
})
@@ -0,0 +1,21 @@
import { describe, expect, it } from 'vitest'
import { renderAssistantSystemPrompt } from '@/lib/assistant-platform/system-prompts'
describe('assistant-platform system prompts', () => {
it('loads api-config-template prompt from lib/prompts/skills and injects providerId', () => {
const prompt = renderAssistantSystemPrompt('api-config-template', {
providerId: 'openai-compatible:oa-1',
})
expect(prompt).toContain('你是 API 配置助手')
expect(prompt).toContain('当前 providerId=openai-compatible:oa-1')
expect(prompt).not.toContain('{{providerId}}')
})
it('loads tutorial prompt from lib/prompts/skills', () => {
const prompt = renderAssistantSystemPrompt('tutorial')
expect(prompt).toContain('你是产品教程助手')
expect(prompt).toContain('禁止编造不存在的页面')
})
})
+166
View File
@@ -0,0 +1,166 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const getProviderConfigMock = vi.hoisted(() => vi.fn(async () => ({
id: 'openai-compatible:oa-1',
apiKey: 'sk-test',
baseUrl: 'https://compat.example.com/v1',
})))
const getUserModelsMock = vi.hoisted(() =>
vi.fn<typeof import('@/lib/api-config').getUserModels>(async () => []),
)
vi.mock('@/lib/api-config', () => ({
getProviderConfig: getProviderConfigMock,
getUserModels: getUserModelsMock,
}))
import { pollAsyncTask } from '@/lib/async-poll'
function encode(value: string): string {
return Buffer.from(value, 'utf8').toString('base64url')
}
describe('async poll ocompat', () => {
beforeEach(() => {
vi.clearAllMocks()
globalThis.fetch = vi.fn() as unknown as typeof fetch
})
it('returns completed with output url when async status reaches done', async () => {
getUserModelsMock.mockResolvedValueOnce([
{
modelKey: 'openai-compatible:oa-1::veo3.1',
modelId: 'veo3.1',
name: 'Veo 3.1',
type: 'video',
provider: 'openai-compatible:oa-1',
price: 0,
compatMediaTemplate: {
version: 1,
mediaType: 'video',
mode: 'async',
create: { method: 'POST', path: '/v2/videos/generations' },
status: { method: 'GET', path: '/v2/videos/generations/{{task_id}}' },
response: {
statusPath: '$.status',
outputUrlPath: '$.video_url',
},
polling: {
intervalMs: 3000,
timeoutMs: 180000,
doneStates: ['succeeded'],
failStates: ['failed'],
},
},
},
])
const fetchMock = vi.fn(async () => new Response(JSON.stringify({
status: 'succeeded',
video_url: 'https://cdn.test/video.mp4',
}), { status: 200 }))
globalThis.fetch = fetchMock as unknown as typeof fetch
const result = await pollAsyncTask(
`OCOMPAT:VIDEO:${encode('openai-compatible:oa-1')}:${encode('openai-compatible:oa-1::veo3.1')}:task_1`,
'user-1',
)
expect(result).toEqual({
status: 'completed',
resultUrl: 'https://cdn.test/video.mp4',
videoUrl: 'https://cdn.test/video.mp4',
})
})
it('uses content endpoint when output url is missing', async () => {
getUserModelsMock.mockResolvedValueOnce([
{
modelKey: 'openai-compatible:oa-1::veo3.1',
modelId: 'veo3.1',
name: 'Veo 3.1',
type: 'video',
provider: 'openai-compatible:oa-1',
price: 0,
compatMediaTemplate: {
version: 1,
mediaType: 'video',
mode: 'async',
create: { method: 'POST', path: '/v2/videos/generations' },
status: { method: 'GET', path: '/v2/videos/generations/{{task_id}}' },
content: { method: 'GET', path: '/v2/videos/generations/{{task_id}}/content' },
response: {
statusPath: '$.status',
},
polling: {
intervalMs: 3000,
timeoutMs: 180000,
doneStates: ['succeeded'],
failStates: ['failed'],
},
},
},
])
const fetchMock = vi.fn(async () => new Response(JSON.stringify({
status: 'succeeded',
}), { status: 200 }))
globalThis.fetch = fetchMock as unknown as typeof fetch
const result = await pollAsyncTask(
`OCOMPAT:VIDEO:${encode('openai-compatible:oa-1')}:${encode('openai-compatible:oa-1::veo3.1')}:task_2`,
'user-1',
)
expect(result.status).toBe('completed')
expect(result.videoUrl).toBe('https://compat.example.com/v1/v2/videos/generations/task_2/content')
expect(result.downloadHeaders).toEqual({
Authorization: 'Bearer sk-test',
})
})
it('accepts compact OCOMPAT token encoded from modelId', async () => {
const providerUuid = '33331fb0-2806-4da6-85ff-cd2433b587d0'
getUserModelsMock.mockResolvedValueOnce([
{
modelKey: `openai-compatible:${providerUuid}::veo3.1-fast`,
modelId: 'veo3.1-fast',
name: 'Veo 3.1 Fast',
type: 'video',
provider: `openai-compatible:${providerUuid}`,
price: 0,
compatMediaTemplate: {
version: 1,
mediaType: 'video',
mode: 'async',
create: { method: 'POST', path: '/video/create' },
status: { method: 'GET', path: '/video/query?id={{task_id}}' },
response: {
statusPath: '$.status',
outputUrlPath: '$.video_url',
},
polling: {
intervalMs: 3000,
timeoutMs: 180000,
doneStates: ['completed'],
failStates: ['failed'],
},
},
},
])
const fetchMock = vi.fn(async () => new Response(JSON.stringify({
status: 'completed',
video_url: 'https://cdn.test/video-fast.mp4',
}), { status: 200 }))
globalThis.fetch = fetchMock as unknown as typeof fetch
const result = await pollAsyncTask(
`OCOMPAT:VIDEO:u_${providerUuid}:${encode('veo3.1-fast')}:task_3`,
'user-1',
)
expect(result).toEqual({
status: 'completed',
resultUrl: 'https://cdn.test/video-fast.mp4',
videoUrl: 'https://cdn.test/video-fast.mp4',
})
})
})
@@ -0,0 +1,65 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const lookupMock = vi.hoisted(() => ({
resolveBuiltinPricing: vi.fn(),
}))
vi.mock('@/lib/model-pricing/lookup', () => ({
resolveBuiltinPricing: lookupMock.resolveBuiltinPricing,
}))
import { calcImage, calcText, calcVideo, calcVoice } from '@/lib/billing/cost'
describe('billing/cost error branches', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('throws ambiguous pricing error when catalog has multiple candidates', () => {
lookupMock.resolveBuiltinPricing.mockReturnValue({
status: 'ambiguous_model',
apiType: 'image',
modelId: 'shared-model',
candidates: [
{
apiType: 'image',
provider: 'p1',
modelId: 'shared-model',
pricing: { mode: 'flat', flatAmount: 1 },
},
{
apiType: 'image',
provider: 'p2',
modelId: 'shared-model',
pricing: { mode: 'flat', flatAmount: 1 },
},
],
})
expect(() => calcImage('shared-model', 1)).toThrow('Ambiguous image pricing modelId')
})
it('throws unknown model when catalog returns not_configured', () => {
lookupMock.resolveBuiltinPricing.mockReturnValue({
status: 'not_configured',
})
expect(() => calcImage('provider::missing-image-model', 1)).toThrow('Unknown image model pricing')
})
it('normalizes invalid numeric inputs to zero before pricing', () => {
lookupMock.resolveBuiltinPricing.mockImplementation(
(input: { selections?: { tokenType?: 'input' | 'output' } }) => {
if (input.selections?.tokenType === 'input') return { status: 'resolved', amount: 2 }
if (input.selections?.tokenType === 'output') return { status: 'resolved', amount: 4 }
return { status: 'resolved', amount: 3 }
},
)
expect(calcText('text-model', Number.NaN, 1_000_000)).toBeCloseTo(4, 8)
expect(calcText('text-model', 1_000_000, Number.NaN)).toBeCloseTo(2, 8)
expect(calcImage('image-model', Number.NaN)).toBe(0)
expect(calcVideo('video-model', '720p', Number.NaN)).toBe(0)
expect(calcVoice(Number.NaN)).toBe(0)
})
})
+240
View File
@@ -0,0 +1,240 @@
import { describe, expect, it } from 'vitest'
import {
USD_TO_CNY,
calcImage,
calcLipSync,
calcText,
calcVideo,
calcVideoByTokens,
calcVoice,
calcVoiceDesign,
} from '@/lib/billing/cost'
describe('billing/cost', () => {
it('calculates text cost by known model price table', () => {
const cost = calcText('anthropic/claude-sonnet-4', 1_000_000, 1_000_000)
expect(cost).toBeCloseTo((3 + 15) * USD_TO_CNY, 8)
})
it('throws when text model pricing is unknown', () => {
expect(() => calcText('unknown-model', 500_000, 250_000)).toThrow('Unknown text model pricing')
})
it('throws when image model pricing is unknown', () => {
expect(() => calcImage('missing-image-model', 3)).toThrow('Unknown image model pricing')
})
it('supports resolution-aware video pricing', () => {
const cost720 = calcVideo('doubao-seedance-1-0-pro-fast-251015', '720p', 2)
const cost1080 = calcVideo('doubao-seedance-1-0-pro-fast-251015', '1080p', 2)
expect(cost720).toBeCloseTo(0.86, 8)
expect(cost1080).toBeCloseTo(2.06, 8)
expect(() => calcVideo('doubao-seedance-1-0-pro-fast-251015', '2k', 1)).toThrow('Unsupported video resolution pricing')
expect(() => calcVideo('unknown-video-model', '720p', 1)).toThrow('Unknown video model pricing')
})
it('scales ark video pricing by selected duration when tiers omit duration', () => {
const shortDuration = calcVideo('doubao-seedance-1-0-pro-250528', '480p', 1, {
generationMode: 'normal',
resolution: '480p',
duration: 2,
})
const longDuration = calcVideo('doubao-seedance-1-0-pro-250528', '1080p', 1, {
generationMode: 'normal',
resolution: '1080p',
duration: 12,
})
expect(shortDuration).toBeCloseTo(0.292, 8)
expect(longDuration).toBeCloseTo(8.808, 8)
})
it('uses Ark 1.5 official default generateAudio=true when audio is omitted', () => {
const defaultAudio = calcVideo('doubao-seedance-1-5-pro-251215', '720p', 1, {
generationMode: 'normal',
resolution: '720p',
})
const muteAudio = calcVideo('doubao-seedance-1-5-pro-251215', '720p', 1, {
generationMode: 'normal',
resolution: '720p',
generateAudio: false,
})
expect(defaultAudio).toBeCloseTo(1.73, 8)
expect(muteAudio).toBeCloseTo(0.86, 8)
})
it('supports Ark Seedance 1.0 Lite i2v pricing and duration scaling', () => {
const shortDuration = calcVideo('doubao-seedance-1-0-lite-i2v-250428', '480p', 1, {
generationMode: 'normal',
resolution: '480p',
duration: 2,
})
const longDuration = calcVideo('doubao-seedance-1-0-lite-i2v-250428', '1080p', 1, {
generationMode: 'firstlastframe',
resolution: '1080p',
duration: 12,
})
expect(shortDuration).toBeCloseTo(0.196, 8)
expect(longDuration).toBeCloseTo(5.88, 8)
})
it('rejects unsupported Ark capability values before pricing', () => {
expect(() => calcVideo('doubao-seedance-1-0-lite-i2v-250428', '720p', 1, {
generationMode: 'normal',
resolution: '720p',
duration: 1,
})).toThrow('Unsupported video capability pricing')
})
it('estimates Seedance 2.0 video pricing from official token formula', () => {
const cost = calcVideo('doubao-seedance-2-0-260128', '720p', 1, {
resolution: '720p',
duration: 5,
aspectRatio: '16:9',
containsVideoInput: false,
})
expect(cost).toBeCloseTo(4.968, 8)
})
it('applies Seedance 2.0 video-input token floor for quoted pricing', () => {
const cost = calcVideo('doubao-seedance-2-0-fast-260128', '720p', 1, {
resolution: '720p',
duration: 5,
aspectRatio: '16:9',
containsVideoInput: true,
inputVideoSeconds: 2,
})
expect(cost).toBeCloseTo(4.2768, 8)
})
it('settles Seedance 2.0 videos from exact usage tokens', () => {
const cost = calcVideoByTokens('doubao-seedance-2-0-260128', 120_000, {
containsVideoInput: false,
})
expect(cost).toBeCloseTo(5.52, 8)
})
it('supports minimax capability-aware video pricing', () => {
const hailuoNormal = calcVideo('minimax-hailuo-2.3', '768p', 1, {
generationMode: 'normal',
resolution: '768p',
duration: 6,
})
const hailuoFirstLast = calcVideo('minimax-hailuo-02', '768p', 1, {
generationMode: 'firstlastframe',
resolution: '768p',
duration: 10,
})
const t2v = calcVideo('t2v-01', '720p', 1, {
generationMode: 'normal',
resolution: '720p',
duration: 6,
})
expect(hailuoNormal).toBeCloseTo(2.0, 8)
expect(hailuoFirstLast).toBeCloseTo(4.0, 8)
expect(t2v).toBeCloseTo(3.0, 8)
expect(() => calcVideo('minimax-hailuo-02', '512p', 1, {
generationMode: 'firstlastframe',
resolution: '512p',
duration: 6,
})).toThrow('Unsupported video capability pricing')
})
it('prefers builtin image pricing over custom pricing when builtin exists', () => {
const builtin = calcImage('banana', 1)
const withCustom = calcImage('banana', 1, undefined, {
image: {
basePrice: 99,
},
})
expect(withCustom).toBeCloseTo(builtin, 8)
})
it('uses custom image option pricing for unknown models', () => {
const cost = calcImage(
'openai-compatible:oa-1::gpt-image-1',
2,
{
resolution: '1024x1024',
quality: 'high',
},
{
image: {
basePrice: 0.2,
optionPrices: {
resolution: {
'1024x1024': 0.05,
},
quality: {
high: 0.1,
},
},
},
},
)
expect(cost).toBeCloseTo((0.2 + 0.05 + 0.1) * 2, 8)
})
it('uses custom video option pricing for unknown models', () => {
const cost = calcVideo(
'openai-compatible:oa-1::sora-2',
'720p',
1,
{
resolution: '720x1280',
duration: 8,
},
{
video: {
basePrice: 0.8,
optionPrices: {
resolution: {
'720x1280': 0.2,
},
duration: {
'8': 0.4,
},
},
},
},
)
expect(cost).toBeCloseTo(1.4, 8)
})
it('fails explicitly when selected custom option price is missing', () => {
expect(() => calcVideo(
'openai-compatible:oa-1::sora-2',
'720p',
1,
{
resolution: '1792x1024',
},
{
video: {
optionPrices: {
resolution: {
'720x1280': 0.2,
},
},
},
},
)).toThrow('No custom video price matched')
})
it('returns deterministic fixed costs for call-based APIs', () => {
expect(calcVoiceDesign()).toBeGreaterThan(0)
expect(calcLipSync()).toBeGreaterThan(0)
expect(calcLipSync('vidu::vidu-lipsync')).toBeGreaterThan(0)
expect(calcLipSync('bailian::videoretalk')).toBeGreaterThan(0)
})
it('calculates voice costs from quantities', () => {
expect(calcVoice(30)).toBeGreaterThan(0)
})
})
+135
View File
@@ -0,0 +1,135 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const prismaMock = vi.hoisted(() => ({
$transaction: vi.fn(),
}))
vi.mock('@/lib/prisma', () => ({
prisma: prismaMock,
}))
vi.mock('@/lib/logging/core', () => ({
logInfo: vi.fn(),
logError: vi.fn(),
}))
import { addBalance, recordShadowUsage } from '@/lib/billing/ledger'
function buildTxStub() {
return {
userBalance: {
upsert: vi.fn(),
},
balanceTransaction: {
findFirst: vi.fn(),
create: vi.fn(),
},
}
}
describe('billing/ledger extra', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('returns false when addBalance amount is invalid', async () => {
const result = await addBalance('u1', 0)
expect(result).toBe(false)
expect(prismaMock.$transaction).not.toHaveBeenCalled()
})
it('adds recharge balance with string reason', async () => {
const tx = buildTxStub()
tx.userBalance.upsert.mockResolvedValue({ balance: 8.5 })
prismaMock.$transaction.mockImplementation(async (callback: (ctx: typeof tx) => Promise<void>) => {
await callback(tx)
})
const result = await addBalance('u1', 5, 'manual recharge')
expect(result).toBe(true)
expect(tx.balanceTransaction.findFirst).not.toHaveBeenCalled()
expect(tx.userBalance.upsert).toHaveBeenCalledTimes(1)
expect(tx.balanceTransaction.create).toHaveBeenCalledWith(expect.objectContaining({
data: expect.objectContaining({
userId: 'u1',
type: 'recharge',
amount: 5,
}),
}))
})
it('supports idempotent addBalance and short-circuits duplicate key', async () => {
const tx = buildTxStub()
tx.balanceTransaction.findFirst.mockResolvedValue({ id: 'existing_tx' })
prismaMock.$transaction.mockImplementation(async (callback: (ctx: typeof tx) => Promise<void>) => {
await callback(tx)
})
const result = await addBalance('u1', 3, {
type: 'adjust',
reason: 'admin adjust',
idempotencyKey: 'idem_1',
operatorId: 'op_1',
externalOrderId: 'order_1',
})
expect(result).toBe(true)
expect(tx.balanceTransaction.findFirst).toHaveBeenCalledTimes(1)
expect(tx.userBalance.upsert).not.toHaveBeenCalled()
expect(tx.balanceTransaction.create).not.toHaveBeenCalled()
})
it('returns false when transaction throws in addBalance', async () => {
prismaMock.$transaction.mockRejectedValue(new Error('db error'))
const result = await addBalance('u1', 2, 'x')
expect(result).toBe(false)
})
it('records shadow usage consume log on success', async () => {
const tx = buildTxStub()
tx.userBalance.upsert.mockResolvedValue({ balance: 11.2 })
prismaMock.$transaction.mockImplementation(async (callback: (ctx: typeof tx) => Promise<void>) => {
await callback(tx)
})
const result = await recordShadowUsage('u1', {
projectId: 'p1',
action: 'analyze',
apiType: 'text',
model: 'anthropic/claude-sonnet-4',
quantity: 1000,
unit: 'token',
cost: 0.25,
metadata: { trace: 'abc' },
})
expect(result).toBe(true)
expect(tx.balanceTransaction.create).toHaveBeenCalledWith(expect.objectContaining({
data: expect.objectContaining({
userId: 'u1',
type: 'shadow_consume',
amount: 0,
}),
}))
})
it('returns false when recordShadowUsage transaction fails', async () => {
prismaMock.$transaction.mockRejectedValue(new Error('shadow failed'))
const result = await recordShadowUsage('u1', {
projectId: 'p1',
action: 'analyze',
apiType: 'text',
model: 'anthropic/claude-sonnet-4',
quantity: 1000,
unit: 'token',
cost: 0.25,
})
expect(result).toBe(false)
})
})
+22
View File
@@ -0,0 +1,22 @@
import { describe, expect, it } from 'vitest'
import { getBillingMode, getBootBillingEnabled } from '@/lib/billing/mode'
describe('billing/mode', () => {
it('falls back to OFF when env is missing', async () => {
delete process.env.BILLING_MODE
await expect(getBillingMode()).resolves.toBe('OFF')
expect(getBootBillingEnabled()).toBe(false)
})
it('normalizes lower-case env mode', async () => {
process.env.BILLING_MODE = 'enforce'
await expect(getBillingMode()).resolves.toBe('ENFORCE')
expect(getBootBillingEnabled()).toBe(true)
})
it('falls back to OFF when env mode is invalid', async () => {
process.env.BILLING_MODE = 'invalid'
await expect(getBillingMode()).resolves.toBe('OFF')
expect(getBootBillingEnabled()).toBe(false)
})
})
+79
View File
@@ -0,0 +1,79 @@
import { AsyncLocalStorage } from 'node:async_hooks'
import { describe, expect, it, vi } from 'vitest'
import { recordTextUsage, withTextUsageCollection } from '@/lib/billing/runtime-usage'
describe('billing/runtime-usage', () => {
it('ignores records outside of collection scope', () => {
expect(() => {
recordTextUsage({
model: 'm',
inputTokens: 10,
outputTokens: 20,
})
}).not.toThrow()
})
it('collects and normalizes token usage', async () => {
const { textUsage } = await withTextUsageCollection(async () => {
recordTextUsage({
model: 'test-model',
inputTokens: 10.9,
outputTokens: -2,
})
return { ok: true }
})
expect(textUsage).toEqual([
{
model: 'test-model',
inputTokens: 10,
outputTokens: 0,
},
])
})
it('falls back to empty usage when store is unavailable at read time', async () => {
const getStoreSpy = vi.spyOn(AsyncLocalStorage.prototype, 'getStore')
getStoreSpy.mockReturnValueOnce(undefined as never)
const payload = await withTextUsageCollection(async () => ({ ok: true }))
expect(payload).toEqual({ result: { ok: true }, textUsage: [] })
getStoreSpy.mockRestore()
})
it('normalizes NaN and zero token values to zero', async () => {
const { textUsage } = await withTextUsageCollection(async () => {
recordTextUsage({
model: 'nan-model',
inputTokens: Number.NaN,
outputTokens: 0,
})
return { ok: true }
})
expect(textUsage).toEqual([
{
model: 'nan-model',
inputTokens: 0,
outputTokens: 0,
},
])
})
it('isolates concurrent async local storage contexts', async () => {
const [left, right] = await Promise.all([
withTextUsageCollection(async () => {
recordTextUsage({ model: 'left', inputTokens: 1, outputTokens: 2 })
return 'left'
}),
withTextUsageCollection(async () => {
recordTextUsage({ model: 'right', inputTokens: 3, outputTokens: 4 })
return 'right'
}),
])
expect(left.textUsage).toEqual([{ model: 'left', inputTokens: 1, outputTokens: 2 }])
expect(right.textUsage).toEqual([{ model: 'right', inputTokens: 3, outputTokens: 4 }])
})
})
+565
View File
@@ -0,0 +1,565 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { calcText, calcVideo, calcVoice } from '@/lib/billing/cost'
import type { TaskBillingInfo } from '@/lib/task/types'
const ledgerMock = vi.hoisted(() => ({
confirmChargeWithRecord: vi.fn(),
freezeBalance: vi.fn(),
getBalance: vi.fn(),
getFreezeByIdempotencyKey: vi.fn(),
increasePendingFreezeAmount: vi.fn(),
recordShadowUsage: vi.fn(),
rollbackFreeze: vi.fn(),
}))
const modeMock = vi.hoisted(() => ({
getBillingMode: vi.fn(),
}))
vi.mock('@/lib/billing/ledger', () => ledgerMock)
vi.mock('@/lib/billing/mode', () => modeMock)
import { BillingOperationError, InsufficientBalanceError } from '@/lib/billing/errors'
import {
handleBillingError,
prepareTaskBilling,
rollbackTaskBilling,
settleTaskBilling,
withTextBilling,
withVoiceBilling,
} from '@/lib/billing/service'
describe('billing/service', () => {
beforeEach(() => {
vi.clearAllMocks()
ledgerMock.confirmChargeWithRecord.mockResolvedValue(true)
ledgerMock.freezeBalance.mockResolvedValue('freeze_1')
ledgerMock.getBalance.mockResolvedValue({ balance: 0 })
ledgerMock.getFreezeByIdempotencyKey.mockResolvedValue(null)
ledgerMock.increasePendingFreezeAmount.mockResolvedValue(true)
ledgerMock.recordShadowUsage.mockResolvedValue(true)
ledgerMock.rollbackFreeze.mockResolvedValue(true)
})
it('returns raw execution result in OFF mode', async () => {
modeMock.getBillingMode.mockResolvedValue('OFF')
const result = await withTextBilling(
'u1',
'anthropic/claude-sonnet-4',
1000,
1000,
{ projectId: 'p1', action: 'a1' },
async () => ({ ok: true }),
)
expect(result).toEqual({ ok: true })
expect(ledgerMock.freezeBalance).not.toHaveBeenCalled()
expect(ledgerMock.confirmChargeWithRecord).not.toHaveBeenCalled()
})
it('records shadow usage in SHADOW mode without freezing', async () => {
modeMock.getBillingMode.mockResolvedValue('SHADOW')
const result = await withTextBilling(
'u1',
'anthropic/claude-sonnet-4',
1000,
1000,
{ projectId: 'p1', action: 'a1' },
async () => ({ ok: true }),
)
expect(result).toEqual({ ok: true })
expect(ledgerMock.freezeBalance).not.toHaveBeenCalled()
expect(ledgerMock.recordShadowUsage).toHaveBeenCalledTimes(1)
})
it('throws InsufficientBalanceError when ENFORCE freeze fails', async () => {
modeMock.getBillingMode.mockResolvedValue('ENFORCE')
ledgerMock.freezeBalance.mockResolvedValue(null)
ledgerMock.getBalance.mockResolvedValue({ balance: 0.01 })
await expect(
withTextBilling(
'u1',
'anthropic/claude-sonnet-4',
1000,
1000,
{ projectId: 'p1', action: 'a1' },
async () => ({ ok: true }),
),
).rejects.toBeInstanceOf(InsufficientBalanceError)
})
it('rolls back freeze when execution throws', async () => {
modeMock.getBillingMode.mockResolvedValue('ENFORCE')
ledgerMock.freezeBalance.mockResolvedValue('freeze_rollback')
await expect(
withTextBilling(
'u1',
'anthropic/claude-sonnet-4',
1000,
1000,
{ projectId: 'p1', action: 'a1' },
async () => {
throw new Error('boom')
},
),
).rejects.toThrow('boom')
expect(ledgerMock.rollbackFreeze).toHaveBeenCalledWith('freeze_rollback')
})
it('expands freeze and charges actual voice usage when actual exceeds quoted', async () => {
modeMock.getBillingMode.mockResolvedValue('ENFORCE')
ledgerMock.freezeBalance.mockResolvedValue('freeze_voice')
await withVoiceBilling(
'u1',
5,
{ projectId: 'p1', action: 'voice_gen' },
async () => ({ actualDurationSeconds: 50 }),
)
const confirmCall = ledgerMock.confirmChargeWithRecord.mock.calls.at(-1)
expect(confirmCall).toBeTruthy()
const chargedAmount = confirmCall?.[2]?.chargedAmount as number
expect(ledgerMock.increasePendingFreezeAmount).toHaveBeenCalledTimes(1)
expect(chargedAmount).toBeCloseTo(calcVoice(50), 8)
})
it('fails and rolls back when overage freeze expansion cannot be covered', async () => {
modeMock.getBillingMode.mockResolvedValue('ENFORCE')
ledgerMock.freezeBalance.mockResolvedValue('freeze_voice_low_balance')
ledgerMock.increasePendingFreezeAmount.mockResolvedValue(false)
ledgerMock.getBalance.mockResolvedValue({ balance: 0.001 })
await expect(
withVoiceBilling(
'u1',
5,
{ projectId: 'p1', action: 'voice_gen' },
async () => ({ actualDurationSeconds: 50 }),
),
).rejects.toBeInstanceOf(InsufficientBalanceError)
expect(ledgerMock.rollbackFreeze).toHaveBeenCalledWith('freeze_voice_low_balance')
})
it('rejects duplicate sync billing key when freeze is already confirmed', async () => {
modeMock.getBillingMode.mockResolvedValue('ENFORCE')
ledgerMock.getFreezeByIdempotencyKey.mockResolvedValue({
id: 'freeze_confirmed',
userId: 'u1',
amount: 0.5,
status: 'confirmed',
})
const execute = vi.fn(async () => ({ ok: true }))
await expect(
withTextBilling(
'u1',
'anthropic/claude-sonnet-4',
1000,
1000,
{ projectId: 'p1', action: 'a1', billingKey: 'billing-key-1' },
execute,
),
).rejects.toThrow('duplicate billing request already confirmed')
expect(execute).not.toHaveBeenCalled()
expect(ledgerMock.freezeBalance).not.toHaveBeenCalled()
})
it('rejects duplicate sync billing key when freeze is pending', async () => {
modeMock.getBillingMode.mockResolvedValue('ENFORCE')
ledgerMock.getFreezeByIdempotencyKey.mockResolvedValue({
id: 'freeze_pending',
userId: 'u1',
amount: 0.5,
status: 'pending',
})
const execute = vi.fn(async () => ({ ok: true }))
await expect(
withTextBilling(
'u1',
'anthropic/claude-sonnet-4',
1000,
1000,
{ projectId: 'p1', action: 'a1', billingKey: 'billing-key-2' },
execute,
),
).rejects.toThrow('duplicate billing request is already in progress')
expect(execute).not.toHaveBeenCalled()
expect(ledgerMock.freezeBalance).not.toHaveBeenCalled()
})
it('maps insufficient balance error to 402 response payload', async () => {
const response = handleBillingError(new InsufficientBalanceError(1.2, 0.3))
expect(response).toBeTruthy()
expect(response?.status).toBe(402)
const body = await response?.json()
expect(body?.code).toBe('INSUFFICIENT_BALANCE')
expect(body?.required).toBeCloseTo(1.2, 8)
expect(body?.available).toBeCloseTo(0.3, 8)
})
it('returns null for non-billing errors', () => {
expect(handleBillingError(new Error('x'))).toBeNull()
expect(handleBillingError('x')).toBeNull()
})
describe('task billing lifecycle helpers', () => {
function buildTaskInfo(overrides: Partial<Extract<TaskBillingInfo, { billable: true }>> = {}): Extract<TaskBillingInfo, { billable: true }> {
return {
billable: true,
source: 'task',
taskType: 'voice_line',
apiType: 'voice',
model: 'index-tts2',
quantity: 5,
unit: 'second',
maxFrozenCost: calcVoice(5),
action: 'voice_line_generate',
metadata: { foo: 'bar' },
...overrides,
}
}
function buildSeedance2VideoTaskInfo(
overrides: Partial<Extract<TaskBillingInfo, { billable: true }>> = {},
): Extract<TaskBillingInfo, { billable: true }> {
return {
billable: true,
source: 'task',
taskType: 'video_panel',
apiType: 'video',
model: 'doubao-seedance-2-0-260128',
quantity: 1,
unit: 'video',
maxFrozenCost: calcVideo('doubao-seedance-2-0-260128', '720p', 1, {
resolution: '720p',
duration: 5,
aspectRatio: '16:9',
containsVideoInput: false,
}),
action: 'video_panel_generate',
metadata: {
resolution: '720p',
duration: 5,
aspectRatio: '16:9',
containsVideoInput: false,
},
...overrides,
}
}
it('prepareTaskBilling handles OFF/SHADOW/ENFORCE paths', async () => {
modeMock.getBillingMode.mockResolvedValueOnce('OFF')
const off = await prepareTaskBilling({
id: 'task_off',
userId: 'u1',
projectId: 'p1',
billingInfo: buildTaskInfo(),
})
expect((off as Extract<TaskBillingInfo, { billable: true }>).status).toBe('skipped')
modeMock.getBillingMode.mockResolvedValueOnce('SHADOW')
const shadow = await prepareTaskBilling({
id: 'task_shadow',
userId: 'u1',
projectId: 'p1',
billingInfo: buildTaskInfo(),
})
expect((shadow as Extract<TaskBillingInfo, { billable: true }>).status).toBe('quoted')
modeMock.getBillingMode.mockResolvedValueOnce('ENFORCE')
ledgerMock.freezeBalance.mockResolvedValueOnce('freeze_task_1')
const enforce = await prepareTaskBilling({
id: 'task_enforce',
userId: 'u1',
projectId: 'p1',
billingInfo: buildTaskInfo(),
})
const enforceInfo = enforce as Extract<TaskBillingInfo, { billable: true }>
expect(enforceInfo.status).toBe('frozen')
expect(enforceInfo.freezeId).toBe('freeze_task_1')
})
it('prepareTaskBilling tolerates unknown text model pricing in SHADOW mode', async () => {
modeMock.getBillingMode.mockResolvedValueOnce('SHADOW')
const unknownTextInfo = buildTaskInfo({
taskType: 'story_to_script_run',
apiType: 'text',
model: 'gpt-5.2',
quantity: 2400,
unit: 'token',
maxFrozenCost: 0,
action: 'story_to_script_run',
})
const shadow = await prepareTaskBilling({
id: 'task_shadow_unknown_text_model',
userId: 'u1',
projectId: 'p1',
billingInfo: unknownTextInfo,
})
const shadowInfo = shadow as Extract<TaskBillingInfo, { billable: true }>
expect(shadowInfo.status).toBe('skipped')
expect(shadowInfo.maxFrozenCost).toBe(0)
})
it('prepareTaskBilling throws InsufficientBalanceError when ENFORCE freeze fails', async () => {
modeMock.getBillingMode.mockResolvedValue('ENFORCE')
ledgerMock.freezeBalance.mockResolvedValue(null)
ledgerMock.getBalance.mockResolvedValue({ balance: 0.001 })
await expect(
prepareTaskBilling({
id: 'task_no_balance',
userId: 'u1',
projectId: 'p1',
billingInfo: buildTaskInfo(),
}),
).rejects.toBeInstanceOf(InsufficientBalanceError)
})
it('settleTaskBilling handles SHADOW and non-ENFORCE snapshots', async () => {
const shadowSettled = await settleTaskBilling({
id: 'task_shadow_settle',
userId: 'u1',
projectId: 'p1',
billingInfo: buildTaskInfo({ modeSnapshot: 'SHADOW', status: 'quoted' }),
})
const shadowInfo = shadowSettled as Extract<TaskBillingInfo, { billable: true }>
expect(shadowInfo.status).toBe('settled')
expect(shadowInfo.chargedCost).toBe(0)
expect(ledgerMock.recordShadowUsage).toHaveBeenCalled()
const offSettled = await settleTaskBilling({
id: 'task_off_settle',
userId: 'u1',
projectId: 'p1',
billingInfo: buildTaskInfo({ modeSnapshot: 'OFF', status: 'quoted' }),
})
const offInfo = offSettled as Extract<TaskBillingInfo, { billable: true }>
expect(offInfo.status).toBe('settled')
expect(offInfo.chargedCost).toBe(0)
})
it('settleTaskBilling does not fail OFF snapshot when text usage model pricing is unknown', async () => {
const settled = await settleTaskBilling({
id: 'task_off_unknown_usage_model',
userId: 'u1',
projectId: 'p1',
billingInfo: buildTaskInfo({
taskType: 'story_to_script_run',
apiType: 'text',
model: 'gpt-5.2',
quantity: 2400,
unit: 'token',
maxFrozenCost: 0,
action: 'story_to_script_run',
modeSnapshot: 'OFF',
status: 'quoted',
}),
}, {
textUsage: [{ model: 'gpt-5.2', inputTokens: 1200, outputTokens: 800 }],
})
const settledInfo = settled as Extract<TaskBillingInfo, { billable: true }>
expect(settledInfo.status).toBe('settled')
expect(settledInfo.chargedCost).toBe(0)
expect(ledgerMock.recordShadowUsage).not.toHaveBeenCalled()
})
it('settleTaskBilling skips SHADOW settlement when text model pricing is unknown', async () => {
const settled = await settleTaskBilling({
id: 'task_shadow_unknown_usage_model',
userId: 'u1',
projectId: 'p1',
billingInfo: buildTaskInfo({
taskType: 'story_to_script_run',
apiType: 'text',
model: 'gpt-5.2',
quantity: 2400,
unit: 'token',
maxFrozenCost: 0,
action: 'story_to_script_run',
modeSnapshot: 'SHADOW',
status: 'quoted',
}),
}, {
textUsage: [{ model: 'gpt-5.2', inputTokens: 1200, outputTokens: 800 }],
})
const settledInfo = settled as Extract<TaskBillingInfo, { billable: true }>
expect(settledInfo.status).toBe('settled')
expect(settledInfo.chargedCost).toBe(0)
expect(ledgerMock.recordShadowUsage).not.toHaveBeenCalled()
})
it('settleTaskBilling handles ENFORCE success/failure branches', async () => {
ledgerMock.confirmChargeWithRecord.mockResolvedValueOnce(true)
const settled = await settleTaskBilling({
id: 'task_enforce_settle',
userId: 'u1',
projectId: 'p1',
billingInfo: buildTaskInfo({ modeSnapshot: 'ENFORCE', freezeId: 'freeze_ok' }),
})
expect((settled as Extract<TaskBillingInfo, { billable: true }>).status).toBe('settled')
const missingFreeze = await settleTaskBilling({
id: 'task_enforce_no_freeze',
userId: 'u1',
projectId: 'p1',
billingInfo: buildTaskInfo({ modeSnapshot: 'ENFORCE', freezeId: null }),
})
expect((missingFreeze as Extract<TaskBillingInfo, { billable: true }>).status).toBe('failed')
ledgerMock.confirmChargeWithRecord.mockRejectedValueOnce(new Error('confirm failed'))
await expect(
settleTaskBilling({
id: 'task_enforce_confirm_fail',
userId: 'u1',
projectId: 'p1',
billingInfo: buildTaskInfo({ modeSnapshot: 'ENFORCE', freezeId: 'freeze_fail' }),
}),
).rejects.toThrow('confirm failed')
})
it('settleTaskBilling throws BILLING_CONFIRM_FAILED when confirm and rollback both fail', async () => {
ledgerMock.confirmChargeWithRecord.mockRejectedValueOnce(new Error('confirm failed'))
ledgerMock.rollbackFreeze.mockRejectedValueOnce(new Error('rollback failed'))
await expect(
settleTaskBilling({
id: 'task_confirm_and_rollback_fail',
userId: 'u1',
projectId: 'p1',
billingInfo: buildTaskInfo({ modeSnapshot: 'ENFORCE', freezeId: 'freeze_rb_fail_confirm' }),
}),
).rejects.toMatchObject({
name: 'BillingOperationError',
code: 'BILLING_CONFIRM_FAILED',
})
})
it('settleTaskBilling rethrows BillingOperationError with task context when rollback succeeds', async () => {
ledgerMock.confirmChargeWithRecord.mockRejectedValueOnce(
new BillingOperationError(
'BILLING_INVALID_FREEZE',
'invalid freeze',
{ reason: 'status_mismatch' },
),
)
let thrown: unknown = null
try {
await settleTaskBilling({
id: 'task_confirm_billing_error',
userId: 'u1',
projectId: 'p1',
billingInfo: buildTaskInfo({ modeSnapshot: 'ENFORCE', freezeId: 'freeze_billing_error' }),
})
} catch (error) {
thrown = error
}
expect(thrown).toBeInstanceOf(BillingOperationError)
const billingError = thrown as BillingOperationError
expect(billingError.code).toBe('BILLING_INVALID_FREEZE')
expect(billingError.details).toMatchObject({
reason: 'status_mismatch',
taskId: 'task_confirm_billing_error',
freezeId: 'freeze_billing_error',
})
})
it('settleTaskBilling expands freeze when actual exceeds quoted', async () => {
ledgerMock.confirmChargeWithRecord.mockResolvedValueOnce(true)
const settled = await settleTaskBilling({
id: 'task_enforce_overage',
userId: 'u1',
projectId: 'p1',
billingInfo: buildTaskInfo({ modeSnapshot: 'ENFORCE', freezeId: 'freeze_overage', quantity: 5 }),
}, {
result: { actualDurationSeconds: 50 },
})
expect(ledgerMock.increasePendingFreezeAmount).toHaveBeenCalledTimes(1)
expect(ledgerMock.confirmChargeWithRecord).toHaveBeenCalled()
expect((settled as Extract<TaskBillingInfo, { billable: true }>).chargedCost).toBeCloseTo(calcVoice(50), 8)
})
it('settleTaskBilling charges Seedance 2.0 videos from exact usage tokens', async () => {
ledgerMock.confirmChargeWithRecord.mockResolvedValueOnce(true)
const settled = await settleTaskBilling({
id: 'task_seedance2_actual_tokens',
userId: 'u1',
projectId: 'p1',
billingInfo: buildSeedance2VideoTaskInfo({
modeSnapshot: 'ENFORCE',
freezeId: 'freeze_seedance2_actual_tokens',
}),
}, {
result: { actualVideoTokens: 120_000 },
})
expect(ledgerMock.increasePendingFreezeAmount).toHaveBeenCalledTimes(1)
expect((settled as Extract<TaskBillingInfo, { billable: true }>).chargedCost).toBeCloseTo(5.52, 8)
})
it('settleTaskBilling keeps quoted charge when text usage has no token counts', async () => {
const quoted = calcText('anthropic/claude-sonnet-4', 500, 500)
const textBillingInfo: Extract<TaskBillingInfo, { billable: true }> = {
billable: true,
source: 'task',
taskType: 'analyze_novel',
apiType: 'text',
model: 'anthropic/claude-sonnet-4',
quantity: 1000,
unit: 'token',
maxFrozenCost: quoted,
action: 'analyze_novel',
modeSnapshot: 'ENFORCE',
status: 'frozen',
freezeId: 'freeze_text_zero',
}
ledgerMock.confirmChargeWithRecord.mockResolvedValueOnce(true)
const settled = await settleTaskBilling({
id: 'task_text_zero_usage',
userId: 'u1',
projectId: 'p1',
billingInfo: textBillingInfo,
}, {
textUsage: [{ model: 'openai/gpt-5', inputTokens: 0, outputTokens: 0 }],
})
expect((settled as Extract<TaskBillingInfo, { billable: true }>).chargedCost).toBeCloseTo(quoted, 8)
const recordParams = ledgerMock.confirmChargeWithRecord.mock.calls.at(-1)?.[1] as { model: string }
expect(recordParams.model).toBe('openai/gpt-5')
})
it('rollbackTaskBilling handles success and fallback branches', async () => {
const rolledBack = await rollbackTaskBilling({
id: 'task_rb_ok',
billingInfo: buildTaskInfo({ modeSnapshot: 'ENFORCE', freezeId: 'freeze_rb_ok' }),
})
expect((rolledBack as Extract<TaskBillingInfo, { billable: true }>).status).toBe('rolled_back')
ledgerMock.rollbackFreeze.mockRejectedValueOnce(new Error('rollback failed'))
const rollbackFailed = await rollbackTaskBilling({
id: 'task_rb_fail',
billingInfo: buildTaskInfo({ modeSnapshot: 'ENFORCE', freezeId: 'freeze_rb_fail' }),
})
expect((rollbackFailed as Extract<TaskBillingInfo, { billable: true }>).status).toBe('failed')
})
})
})
+82
View File
@@ -0,0 +1,82 @@
import { describe, expect, it } from 'vitest'
import { TASK_TYPE } from '@/lib/task/types'
import { buildDefaultTaskBillingInfo, isBillableTaskType } from '@/lib/billing/task-policy'
import type { TaskBillingInfo } from '@/lib/task/types'
function expectBillableInfo(info: TaskBillingInfo | null): Extract<TaskBillingInfo, { billable: true }> {
expect(info).toBeTruthy()
expect(info?.billable).toBe(true)
if (!info || !info.billable) {
throw new Error('Expected billable task billing info')
}
return info
}
describe('billing/task-policy', () => {
const billingPayload = {
analysisModel: 'anthropic/claude-sonnet-4',
imageModel: 'seedream',
videoModel: 'doubao-seedance-1-5-pro-251215',
} as const
it('builds TaskBillingInfo for every billable task type', () => {
for (const taskType of Object.values(TASK_TYPE)) {
if (!isBillableTaskType(taskType)) continue
const info = expectBillableInfo(buildDefaultTaskBillingInfo(taskType, billingPayload))
expect(info.taskType).toBe(taskType)
expect(info.maxFrozenCost).toBeGreaterThanOrEqual(0)
}
})
it('returns null for a non-billable task type', () => {
const fake = 'not_billable' as unknown as (typeof TASK_TYPE)[keyof typeof TASK_TYPE]
expect(isBillableTaskType(fake)).toBe(false)
expect(buildDefaultTaskBillingInfo(fake, {})).toBeNull()
})
it('builds text billing info from explicit model payload', () => {
const info = expectBillableInfo(buildDefaultTaskBillingInfo(TASK_TYPE.ANALYZE_NOVEL, {
analysisModel: 'anthropic/claude-sonnet-4',
}))
expect(info.apiType).toBe('text')
expect(info.model).toBe('anthropic/claude-sonnet-4')
expect(info.quantity).toBe(4200)
})
it('returns null for missing required models in text/image/video tasks', () => {
expect(buildDefaultTaskBillingInfo(TASK_TYPE.ANALYZE_NOVEL, {})).toBeNull()
expect(buildDefaultTaskBillingInfo(TASK_TYPE.IMAGE_PANEL, {})).toBeNull()
expect(buildDefaultTaskBillingInfo(TASK_TYPE.VIDEO_PANEL, {})).toBeNull()
})
it('honors candidateCount/count for image tasks', () => {
const info = expectBillableInfo(buildDefaultTaskBillingInfo(TASK_TYPE.IMAGE_PANEL, {
candidateCount: 4,
imageModel: 'seedream4',
}))
expect(info.apiType).toBe('image')
expect(info.quantity).toBe(4)
expect(info.model).toBe('seedream4')
})
it('builds video billing info from firstLastFrame.flModel', () => {
const info = expectBillableInfo(buildDefaultTaskBillingInfo(TASK_TYPE.VIDEO_PANEL, {
firstLastFrame: {
flModel: 'doubao-seedance-1-0-pro-250528',
},
duration: 8,
}))
expect(info.apiType).toBe('video')
expect(info.model).toBe('doubao-seedance-1-0-pro-250528')
expect(info.quantity).toBe(1)
})
it('uses explicit lip sync model from payload', () => {
const info = expectBillableInfo(buildDefaultTaskBillingInfo(TASK_TYPE.LIP_SYNC, {
lipSyncModel: 'vidu::vidu-lipsync',
}))
expect(info.apiType).toBe('lip-sync')
expect(info.model).toBe('vidu::vidu-lipsync')
expect(info.quantity).toBe(1)
})
})
@@ -0,0 +1,34 @@
import { describe, expect, it, vi } from 'vitest'
import { copyPreviewJsonText } from '@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/storyboard/AIDataModalPreviewPane'
describe('AIDataModalPreviewPane copy helper', () => {
it('falls back to execCommand when clipboard api rejects', async () => {
const writeText = vi.fn(async () => {
throw new Error('clipboard denied')
})
const appendChild = vi.fn()
const removeChild = vi.fn()
const select = vi.fn()
const textarea = {
value: '',
style: {} as Record<string, string>,
select,
}
vi.stubGlobal('navigator', { clipboard: { writeText } })
vi.stubGlobal('document', {
body: {
appendChild,
removeChild,
},
createElement: vi.fn(() => textarea),
execCommand: vi.fn(() => true),
})
await expect(copyPreviewJsonText('{"a":1}')).resolves.toBeUndefined()
expect(writeText).toHaveBeenCalledWith('{"a":1}')
expect(appendChild).toHaveBeenCalledWith(textarea)
expect(select).toHaveBeenCalled()
})
})
@@ -0,0 +1,48 @@
import * as React from 'react'
import { createElement } from 'react'
import { describe, expect, it, vi } from 'vitest'
import { renderToStaticMarkup } from 'react-dom/server'
import AIDataModal from '@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/storyboard/AIDataModal'
vi.mock('next-intl', () => ({
useTranslations: () => (key: string) => key,
}))
vi.mock('react-dom', () => ({
createPortal: (node: unknown) => node,
}))
describe('AIDataModal', () => {
it('在查看数据预览中展示角色完整数据与 slot', () => {
Reflect.set(globalThis, 'React', React)
vi.stubGlobal('document', { body: {} })
const html = renderToStaticMarkup(
createElement(AIDataModal, {
isOpen: true,
onClose: () => undefined,
panelNumber: 1,
shotType: 'medium shot',
cameraMove: 'static',
description: '皇帝立于大殿中央',
location: '皇宫大殿',
characters: [
{
name: '皇帝',
appearance: '朝服形象',
slot: '皇宫正中龙椅前方台阶下的位置',
},
],
videoPrompt: 'dramatic court scene',
photographyRules: null,
actingNotes: null,
videoRatio: '16:9',
onSave: () => undefined,
}),
)
expect(html).toContain('&quot;characters&quot;')
expect(html).toContain('&quot;appearance&quot;: &quot;朝服形象&quot;')
expect(html).toContain('&quot;slot&quot;: &quot;皇宫正中龙椅前方台阶下的位置&quot;')
})
})
@@ -0,0 +1,155 @@
import * as React from 'react'
import { createElement } from 'react'
import { renderToStaticMarkup } from 'react-dom/server'
import { describe, expect, it, vi } from 'vitest'
import { NextIntlClientProvider } from 'next-intl'
import type { AbstractIntlMessages } from 'next-intl'
vi.mock('@/components/ui/icons', () => ({
AppIcon: (props: { className?: string }) => createElement('span', { className: props.className }),
}))
vi.mock('@/components/task/TaskStatusInline', () => ({
default: () => createElement('span', null, 'loading'),
}))
vi.mock('@/lib/task/presentation', () => ({
resolveTaskPresentationState: () => null,
}))
vi.mock('@/lib/query/hooks', () => ({
useUpdateCharacterName: () => ({ isPending: false, mutateAsync: vi.fn() }),
useUpdateProjectCharacterName: () => ({ isPending: false, mutateAsync: vi.fn() }),
useUpdateCharacterAppearanceDescription: () => ({ mutateAsync: vi.fn() }),
useUpdateProjectAppearanceDescription: () => ({ mutateAsync: vi.fn() }),
useUpdateProjectCharacterIntroduction: () => ({ mutateAsync: vi.fn() }),
useAiModifyCharacterDescription: () => ({ mutateAsync: vi.fn() }),
useAiModifyProjectAppearanceDescription: () => ({ mutateAsync: vi.fn() }),
useUpdateLocationName: () => ({ isPending: false, mutateAsync: vi.fn() }),
useUpdateProjectLocationName: () => ({ isPending: false, mutateAsync: vi.fn() }),
useUpdateLocationSummary: () => ({ mutateAsync: vi.fn() }),
useUpdateProjectLocationDescription: () => ({ mutateAsync: vi.fn() }),
useAiModifyLocationDescription: () => ({ mutateAsync: vi.fn() }),
useAiModifyProjectLocationDescription: () => ({ mutateAsync: vi.fn() }),
useAiModifyPropDescription: () => ({ mutateAsync: vi.fn() }),
useAiModifyProjectPropDescription: () => ({ mutateAsync: vi.fn() }),
useAssetActions: () => ({
update: vi.fn(),
updateVariant: vi.fn(),
generate: vi.fn(),
}),
}))
const messages = {
assets: {
common: {
cancel: '取消',
},
character: {
name: '角色名',
appearance: '形象',
},
location: {
name: '场景名',
description: '场景描述',
},
prop: {
name: '道具名',
summary: '简要说明',
summaryPlaceholder: '一句话说明这是什么道具,不写剧情用途',
description: '图片描述',
descriptionPlaceholder: '只写道具本体的材质、颜色、结构和装饰细节',
},
modal: {
editCharacter: '编辑角色',
editLocation: '编辑场景',
editProp: '编辑道具',
namePlaceholder: '输入名称',
appearancePrompt: '形象描述提示词',
descPlaceholder: '输入描述',
modifyDescription: 'AI修改描述',
modifyPlaceholder: '改成夜晚',
modifyPlaceholderCharacter: '改成黑色西装',
modifyPlaceholderProp: '改成磨砂银质',
saveName: '保存名字',
saveOnly: '仅保存',
saveAndGenerate: '保存并生成',
introduction: '角色介绍',
introductionPlaceholder: '输入角色介绍',
introductionTip: '介绍角色在故事中的身份',
},
smartImport: {
preview: {
saving: '保存中',
},
},
errors: {
saveFailed: '保存失败',
failed: '失败',
},
},
} as const
const TestIntlProvider = NextIntlClientProvider as React.ComponentType<{
locale: string
messages: AbstractIntlMessages
timeZone: string
children?: React.ReactNode
}>
function renderWithMessages(node: React.ReactElement) {
return renderToStaticMarkup(
createElement(
TestIntlProvider,
{
locale: 'zh',
messages: messages as unknown as AbstractIntlMessages,
timeZone: 'Asia/Shanghai',
},
node,
),
)
}
describe('asset edit modal AI layout', () => {
it('renders character AI modify action inside the description composer instead of a standalone smart-modify card', async () => {
Reflect.set(globalThis, 'React', React)
const { CharacterEditModal } = await import('@/components/shared/assets/CharacterEditModal')
const html = renderWithMessages(
createElement(CharacterEditModal, {
mode: 'project',
characterId: 'character-1',
characterName: '沈烬',
description: '冷峻禁欲的男性角色形象描述',
appearanceId: 'appearance-1',
onClose: () => undefined,
onSave: () => undefined,
}),
)
expect(html).toContain('AI修改描述')
expect(html).not.toContain('改成黑色西装')
expect(html).not.toContain('智能修改')
})
it('renders prop AI modify action with the prop-specific placeholder', async () => {
Reflect.set(globalThis, 'React', React)
const { PropEditModal } = await import('@/components/shared/assets/PropEditModal')
const html = renderWithMessages(
createElement(PropEditModal, {
mode: 'project',
propId: 'prop-1',
propName: '遗物匕首',
summary: '旧时代留下的金属短刃',
description: '青铜短刃,刃面斑驳,手柄有细密雕纹',
variantId: 'prop-variant-1',
projectId: 'project-1',
onClose: () => undefined,
}),
)
expect(html).toContain('AI修改描述')
expect(html).not.toContain('改成磨砂银质')
expect(html).not.toContain('智能修改')
})
})
+158
View File
@@ -0,0 +1,158 @@
import * as React from 'react'
import { createElement } from 'react'
import type { ComponentProps, ReactElement } from 'react'
import { describe, expect, it, vi } from 'vitest'
import { renderToStaticMarkup } from 'react-dom/server'
import { NextIntlClientProvider } from 'next-intl'
import type { AbstractIntlMessages } from 'next-intl'
import { AssetGrid } from '@/app/[locale]/workspace/asset-hub/components/AssetGrid'
vi.mock('react', async (importOriginal) => {
const actual = await importOriginal<typeof import('react')>()
return {
...actual,
useState: <T,>(initialState: T | (() => T)) => {
const resolvedInitialState = typeof initialState === 'function'
? (initialState as () => T)()
: initialState
if (resolvedInitialState === 'all') {
return actual.useState('location' as T)
}
return actual.useState(resolvedInitialState)
},
}
})
vi.mock('@/app/[locale]/workspace/asset-hub/components/CharacterCard', () => ({
CharacterCard: () => null,
}))
vi.mock('@/app/[locale]/workspace/asset-hub/components/LocationCard', () => ({
LocationCard: () => null,
}))
vi.mock('@/app/[locale]/workspace/asset-hub/components/VoiceCard', () => ({
VoiceCard: () => null,
}))
vi.mock('@/components/task/TaskStatusInline', () => ({
default: () => null,
}))
const messages = {
assetHub: {
allAssets: '所有资产',
characters: '角色',
locations: '场景',
props: '道具',
voices: '音色',
addAsset: '新建资产',
addCharacter: '新建角色',
addLocation: '新建场景',
addProp: '新建道具',
addVoice: '新建音色',
downloadAll: '打包下载',
downloadAllTitle: '下载全部图片资产',
downloading: '打包中...',
emptyState: '暂无资产',
emptyStateHint: '点击上方按钮添加角色或场景',
filteredEmptyHint: '点击新建资产添加资产',
pagination: {
previous: '上一页',
next: '下一页',
},
},
} as const
const renderWithIntl = (node: ReactElement) => {
const providerProps: ComponentProps<typeof NextIntlClientProvider> = {
locale: 'zh',
messages: messages as unknown as AbstractIntlMessages,
timeZone: 'Asia/Shanghai',
children: node,
}
return renderToStaticMarkup(
createElement(NextIntlClientProvider, providerProps),
)
}
describe('AssetGrid', () => {
it('空状态下使用与资产库一致的 compact 分段控件,并在中间显示新建资产按钮', () => {
Reflect.set(globalThis, 'React', React)
const html = renderWithIntl(
createElement(AssetGrid, {
assets: [],
loading: false,
onAddCharacter: () => undefined,
onAddLocation: () => undefined,
onAddProp: () => undefined,
onAddVoice: () => undefined,
onDownloadAll: () => undefined,
isDownloading: false,
selectedFolderId: null,
}),
)
expect(html).toContain('inline-block max-w-full min-w-max')
expect(html).toContain('inline-grid grid-flow-col auto-cols-[minmax(96px,max-content)]')
expect(html).toContain('justify-center')
expect(html).toContain('>新建资产<')
})
it('当前筛选分类没有资产时显示添加提示文案', () => {
Reflect.set(globalThis, 'React', React)
const html = renderWithIntl(
createElement(AssetGrid, {
assets: [
{
id: 'character-1',
kind: 'character',
family: 'visual',
scope: 'project',
name: '角色A',
folderId: null,
capabilities: {
canGenerate: true,
canSelectRender: false,
canRevertRender: false,
canModifyRender: false,
canUploadRender: false,
canBindVoice: false,
canCopyFromGlobal: false,
},
taskRefs: [],
taskState: { isRunning: false, lastError: null },
variants: [],
introduction: null,
profileData: null,
profileConfirmed: null,
profileTaskRefs: [],
profileTaskState: { isRunning: false, lastError: null },
voice: {
voiceType: null,
voiceId: null,
customVoiceUrl: null,
media: null,
},
},
],
loading: false,
onAddCharacter: () => undefined,
onAddLocation: () => undefined,
onAddProp: () => undefined,
onAddVoice: () => undefined,
onDownloadAll: () => undefined,
isDownloading: false,
selectedFolderId: null,
}),
)
expect(html).toContain('点击新建资产添加资产')
})
})
@@ -0,0 +1,243 @@
import * as React from 'react'
import { createElement } from 'react'
import { renderToStaticMarkup } from 'react-dom/server'
import { describe, expect, it, vi } from 'vitest'
import { NextIntlClientProvider } from 'next-intl'
import type { AbstractIntlMessages } from 'next-intl'
const idleMutation = {
isPending: false,
mutate: vi.fn(),
}
vi.mock('@/lib/query/mutations', () => ({
useGenerateCharacterImage: () => idleMutation,
useSelectCharacterImage: () => idleMutation,
useUndoCharacterImage: () => idleMutation,
useUploadCharacterImage: () => idleMutation,
useDeleteCharacter: () => idleMutation,
useDeleteCharacterAppearance: () => idleMutation,
useUploadCharacterVoice: () => idleMutation,
useGenerateLocationImage: () => idleMutation,
useSelectLocationImage: () => idleMutation,
useUndoLocationImage: () => idleMutation,
useUploadLocationImage: () => idleMutation,
useDeleteLocation: () => idleMutation,
}))
vi.mock('@/components/ui/icons', () => ({
AppIcon: (props: { className?: string; name?: string }) =>
createElement('span', { className: props.className, 'data-icon': props.name }),
}))
vi.mock('@/components/task/TaskStatusOverlay', () => ({
default: () => createElement('div', null, 'overlay'),
}))
vi.mock('@/components/task/TaskStatusInline', () => ({
default: () => createElement('span', null, 'inline'),
}))
vi.mock('@/components/media/MediaImageWithLoading', () => ({
MediaImageWithLoading: (props: { containerClassName?: string; className?: string }) =>
createElement('div', {
className: [props.containerClassName, props.className].filter(Boolean).join(' '),
}),
}))
vi.mock('@/components/image-generation/ImageGenerationInlineCountButton', () => ({
default: () => createElement('button', null, 'count'),
}))
vi.mock('@/lib/task/presentation', () => ({
resolveTaskPresentationState: () => null,
}))
vi.mock('@/lib/image-generation/use-image-generation-count', () => ({
useImageGenerationCount: () => ({
count: 1,
setCount: vi.fn(),
}),
}))
vi.mock('@/lib/image-generation/count', () => ({
getImageGenerationCountOptions: () => [{ value: 1, label: '1' }],
}))
vi.mock('@/app/[locale]/workspace/asset-hub/components/VoiceSettings', () => ({
default: () => createElement('div', null, 'voice-settings'),
}))
const messages = {
assetHub: {
generateFailed: '生成失败',
selectFailed: '选择失败',
uploadFailed: '上传失败',
confirmDeleteLocation: '确认删除场景',
confirmDeleteProp: '确认删除道具',
confirmDeleteCharacter: '确认删除角色',
cancel: '取消',
delete: '删除',
propLabel: '道具',
locationLabel: '场景',
},
assets: {
image: {
generateCountPrefix: '生成',
generateCountSuffix: '张',
generating: '生成中',
generatingPlaceholder: '正在生成',
regenerateStuck: '重新生成',
regenCountPrefix: '重生成',
undo: '撤回',
upload: '上传',
uploadReplace: '替换',
edit: '编辑',
selectCount: '选择数量',
confirmOption: '确认选择',
optionNumber: '方案 {number}',
},
common: {
generateFailed: '生成失败',
},
location: {
regenerateImage: '重生成场景',
edit: '编辑场景',
delete: '删除场景',
},
prop: {
regenerateImage: '重生成道具',
edit: '编辑道具',
delete: '删除道具',
},
character: {
deleteWhole: '删除整个角色',
primary: '主形象',
secondary: '子形象',
delete: '删除角色',
deleteOptions: '删除选项',
},
video: {
panelCard: {
editPrompt: '编辑',
},
},
},
} as const
const TestIntlProvider = NextIntlClientProvider as React.ComponentType<{
locale: string
messages: AbstractIntlMessages
timeZone: string
children?: React.ReactNode
}>
function renderWithIntl(node: React.ReactElement) {
return renderToStaticMarkup(
createElement(
TestIntlProvider,
{
locale: 'zh',
messages: messages as unknown as AbstractIntlMessages,
timeZone: 'Asia/Shanghai',
},
node,
),
)
}
describe('asset hub card aspect ratio', () => {
it('keeps prop cards at the same 3:2 ratio as character assets while generation is running', async () => {
Reflect.set(globalThis, 'React', React)
const { default: LocationCard } = await import('@/app/[locale]/workspace/asset-hub/components/LocationCard')
const html = renderWithIntl(
createElement(LocationCard, {
location: {
id: 'prop-1',
name: '鼠标',
summary: '电脑鼠标',
folderId: null,
images: [
{
id: 'prop-image-1',
imageIndex: 0,
description: null,
imageUrl: null,
previousImageUrl: null,
isSelected: false,
imageTaskRunning: true,
},
],
},
assetType: 'prop',
}),
)
expect(html).toContain('aspect-[3/2]')
expect(html).toContain('data-icon="image"')
expect(html).not.toContain('min-h-[100px]')
})
it('keeps location cards square while generation is running', async () => {
Reflect.set(globalThis, 'React', React)
const { default: LocationCard } = await import('@/app/[locale]/workspace/asset-hub/components/LocationCard')
const html = renderWithIntl(
createElement(LocationCard, {
location: {
id: 'location-1',
name: '餐厅',
summary: '极简餐厅',
folderId: null,
images: [
{
id: 'location-image-1',
imageIndex: 0,
description: null,
imageUrl: null,
previousImageUrl: null,
isSelected: false,
imageTaskRunning: true,
},
],
},
assetType: 'location',
}),
)
expect(html).toContain('aspect-square')
expect(html).toContain('data-icon="image"')
expect(html).not.toContain('min-h-[100px]')
})
it('keeps character cards at the fixed 3:2 ratio while generation is running', async () => {
Reflect.set(globalThis, 'React', React)
const { CharacterCard } = await import('@/app/[locale]/workspace/asset-hub/components/CharacterCard')
const html = renderWithIntl(
createElement(CharacterCard, {
character: {
id: 'character-1',
name: '沈烬',
folderId: null,
customVoiceUrl: null,
appearances: [
{
id: 'appearance-1',
appearanceIndex: 0,
changeReason: '默认形象',
description: null,
imageUrl: null,
imageUrls: [],
selectedIndex: null,
previousImageUrl: null,
previousImageUrls: [],
imageTaskRunning: true,
},
],
},
}),
)
expect(html).toContain('aspect-[3/2]')
expect(html).not.toContain('min-h-[100px]')
})
})
@@ -0,0 +1,80 @@
import * as React from 'react'
import { createElement } from 'react'
import type { ComponentProps, ReactElement } from 'react'
import { describe, expect, it, vi } from 'vitest'
import { renderToStaticMarkup } from 'react-dom/server'
import { NextIntlClientProvider } from 'next-intl'
import type { AbstractIntlMessages } from 'next-intl'
import AssetToolbar from '@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/assets/AssetToolbar'
vi.mock('@/lib/query/hooks', () => ({
useProjectAssets: vi.fn(() => ({ data: { characters: [], locations: [], props: [] } })),
useProjectData: vi.fn(() => ({ data: { name: '项目A' } })),
}))
const messages = {
assets: {
common: {
refresh: '刷新',
},
filterBar: {
allEpisodes: '全部集数',
},
toolbar: {
assetManagement: '资产管理',
assetCount: '共 {total} 个资产({appearances} 角色形象 + {locations} 场景 + {props} 道具)',
globalAnalyze: '全局分析',
globalAnalyzeHint: '分析所有资产',
downloadAll: '下载全部',
generateAll: '生成全部图片',
regenerateAll: '重新生成全部',
regenerateAllHint: '重新生成所有图片',
},
assetLibrary: {
downloadEmpty: '没有可下载图片',
downloadFailed: '下载失败',
},
},
} as const
const renderWithIntl = (node: ReactElement) => {
const providerProps: ComponentProps<typeof NextIntlClientProvider> = {
locale: 'zh',
messages: messages as unknown as AbstractIntlMessages,
timeZone: 'Asia/Shanghai',
children: node,
}
return renderToStaticMarkup(
createElement(NextIntlClientProvider, providerProps),
)
}
describe('AssetToolbar', () => {
it('删除批量生成与刷新按钮 -> 仅保留全局分析和下载入口', () => {
Reflect.set(globalThis, 'React', React)
const html = renderWithIntl(
createElement(AssetToolbar, {
projectId: 'project-1',
totalAssets: 24,
totalAppearances: 11,
totalLocations: 13,
totalProps: 0,
isBatchSubmitting: false,
isAnalyzingAssets: false,
isGlobalAnalyzing: false,
onGlobalAnalyze: () => undefined,
episodeId: null,
onEpisodeChange: () => undefined,
episodes: [],
}),
)
expect(html).toContain('全局分析')
expect(html).toContain('title="下载全部"')
expect(html).not.toContain('生成全部图片')
expect(html).not.toContain('重新生成全部')
expect(html).not.toContain('>刷新<')
})
})
@@ -0,0 +1,46 @@
import * as React from 'react'
import { createElement } from 'react'
import { renderToStaticMarkup } from 'react-dom/server'
import { describe, expect, it, vi } from 'vitest'
import { CapsuleNav, EpisodeSelector } from '@/components/ui/CapsuleNav'
vi.mock('next-intl', () => ({
useTranslations: () => (key: string) => key,
}))
vi.mock('@/components/ui/icons', () => ({
AppIcon: ({ name, className }: { name: string; className?: string }) =>
createElement('span', { 'data-icon': name, className }),
}))
describe('CapsuleNav layering', () => {
it('keeps fixed workspace navigation below modal overlays', () => {
Reflect.set(globalThis, 'React', React)
const html = renderToStaticMarkup(
createElement('div', null,
createElement(CapsuleNav, {
items: [
{ id: 'config', icon: 'sparkles', label: '配置', status: 'active' as const },
],
activeId: 'config',
onItemClick: () => undefined,
projectId: 'project-1',
}),
createElement(EpisodeSelector, {
episodes: [
{ id: 'episode-1', title: '剧集 1' },
],
currentId: 'episode-1',
onSelect: () => undefined,
projectName: '项目 A',
}),
),
)
expect(html).toContain('fixed top-20 left-1/2 -translate-x-1/2 z-40')
expect(html).toContain('fixed top-20 left-6 z-40')
expect(html).not.toContain('z-50 animate-fadeInDown')
expect(html).not.toContain('z-[60]')
})
})
@@ -0,0 +1,70 @@
import * as React from 'react'
import { createElement } from 'react'
import { renderToStaticMarkup } from 'react-dom/server'
import { describe, expect, it, vi } from 'vitest'
import { NextIntlClientProvider } from 'next-intl'
import type { AbstractIntlMessages } from 'next-intl'
vi.mock('@/components/ui/icons', () => ({
AppIcon: () => createElement('span', null),
}))
vi.mock('@/components/task/TaskStatusOverlay', () => ({
default: () => createElement('div', null, 'overlay'),
}))
vi.mock('@/components/media/MediaImageWithLoading', () => ({
MediaImageWithLoading: (props: { containerClassName?: string; className?: string }) =>
createElement('div', { className: [props.containerClassName, props.className].filter(Boolean).join(' ') }),
}))
const messages = {
assets: {
common: {
generateFailed: '生成失败',
},
image: {
optionNumber: '方案 {number}',
},
},
} as const
const TestIntlProvider = NextIntlClientProvider as React.ComponentType<{
locale: string
messages: AbstractIntlMessages
timeZone: string
children?: React.ReactNode
}>
describe('CharacterCardGallery aspect ratio', () => {
it('renders the single-image slot at a fixed 3:2 ratio', async () => {
Reflect.set(globalThis, 'React', React)
const { default: CharacterCardGallery } = await import('@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/assets/character-card/CharacterCardGallery')
const html = renderToStaticMarkup(
createElement(
TestIntlProvider,
{
locale: 'zh',
messages: messages as unknown as AbstractIntlMessages,
timeZone: 'Asia/Shanghai',
},
createElement(CharacterCardGallery, {
mode: 'single',
characterName: '沈烬',
changeReason: '默认形象',
aspectClassName: 'aspect-[3/2]',
currentImageUrl: null,
selectedIndex: null,
hasMultipleImages: false,
isAppearanceTaskRunning: true,
displayTaskPresentation: null,
onImageClick: () => undefined,
overlayActions: null,
}),
),
)
expect(html).toContain('aspect-[3/2]')
})
})
@@ -0,0 +1,116 @@
import * as React from 'react'
import { createElement } from 'react'
import type { ComponentProps, ReactElement } from 'react'
import { describe, expect, it, vi } from 'vitest'
import { renderToStaticMarkup } from 'react-dom/server'
import { NextIntlClientProvider } from 'next-intl'
import type { AbstractIntlMessages } from 'next-intl'
import { CharacterCreationModal } from '@/components/shared/assets/CharacterCreationModal'
vi.mock('@/lib/query/hooks', () => ({
useProjectAssets: vi.fn(() => ({ data: { characters: [] } })),
}))
vi.mock('@/components/shared/assets/character-creation/hooks/useCharacterCreationSubmit', () => ({
useCharacterCreationSubmit: vi.fn(() => ({
isSubmitting: false,
isAiDesigning: false,
isExtracting: false,
characterGenerationCount: 3,
setCharacterGenerationCount: vi.fn(),
referenceCharacterGenerationCount: 3,
setReferenceCharacterGenerationCount: vi.fn(),
handleExtractDescription: vi.fn(),
handleCreateWithReference: vi.fn(),
handleAiDesign: vi.fn(),
handleSubmit: vi.fn(),
handleSubmitAndGenerate: vi.fn(),
})),
}))
const messages = {
assetModal: {
character: {
title: '新建角色',
name: '角色名称',
namePlaceholder: '请输入角色名称',
modeReference: '参考图模式',
modeDescription: '描述模式',
uploadReference: '上传参考图',
pasteHint: 'Ctrl+V 粘贴',
generationMode: '生成方式',
directGenerate: '直接生成',
extractPrompt: '反推提示词',
extractFirst: '先提取描述',
description: '角色描述',
descPlaceholder: '请输入角色外貌描述...',
isSubAppearance: '这是一个子形象',
isSubAppearanceHint: '为已有角色添加新的形象状态',
selectMainCharacter: '选择主角色',
selectCharacterPlaceholder: '请选择角色...',
appearancesCount: '{count} 个形象',
changeReason: '形象变化原因',
changeReasonPlaceholder: '例如',
useReferenceGeneratePrefix: '使用参考图生成',
generateCountSuffix: '张图片',
selectReferenceGenerateCount: '选择参考图生成数量',
},
artStyle: { title: '画面风格' },
aiDesign: {
title: 'AI 设计',
placeholder: '描述你想要的角色特征...',
generating: '设计中...',
generate: '生成',
},
common: {
creating: '创建中...',
cancel: '取消',
adding: '添加中...',
add: '添加',
addOnly: '仅添加角色',
addOnlyToAssetHub: '仅添加人物到资产库',
addAndGeneratePrefix: '添加并生成',
generateCountSuffix: '张图片',
selectGenerateCount: '选择生成数量',
optional: '(可选)',
},
errors: {
uploadFailed: '上传失败',
extractDescriptionFailed: '提取描述失败',
createFailed: '创建失败',
aiDesignFailed: 'AI 设计失败',
addSubAppearanceFailed: '添加子形象失败',
insufficientBalance: '账户余额不足',
},
},
} as const
const renderWithIntl = (node: ReactElement) => {
const providerProps: ComponentProps<typeof NextIntlClientProvider> = {
locale: 'zh',
messages: messages as unknown as AbstractIntlMessages,
timeZone: 'Asia/Shanghai',
children: node,
}
return renderToStaticMarkup(
createElement(NextIntlClientProvider, providerProps),
)
}
describe('CharacterCreationModal', () => {
it('renders add-only and add-and-generate actions in the fixed footer', () => {
Reflect.set(globalThis, 'React', React)
const html = renderWithIntl(
createElement(CharacterCreationModal, {
mode: 'asset-hub',
onClose: () => undefined,
onSuccess: () => undefined,
}),
)
expect(html).toContain('仅添加人物到资产库')
expect(html).toContain('添加并生成')
expect(html).toContain('取消')
})
})
@@ -0,0 +1,143 @@
import * as React from 'react'
import { createElement } from 'react'
import type { ComponentProps, ReactElement } from 'react'
import { renderToStaticMarkup } from 'react-dom/server'
import { describe, expect, it, vi } from 'vitest'
import { NextIntlClientProvider } from 'next-intl'
import type { AbstractIntlMessages } from 'next-intl'
import CharacterSection from '@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/assets/CharacterSection'
const useProjectAssetsMock = vi.hoisted(() => vi.fn())
const characterCardMock = vi.hoisted(() => vi.fn((_props: unknown) => null))
vi.mock('@/lib/query/hooks/useProjectAssets', () => ({
useProjectAssets: (projectId: string | null) => useProjectAssetsMock(projectId),
}))
vi.mock('@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/assets/CharacterCard', () => ({
__esModule: true,
default: (props: unknown) => characterCardMock(props),
}))
vi.mock('@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/assets/CharacterProfileCard', () => ({
__esModule: true,
default: () => null,
}))
vi.mock('@/types/character-profile', () => ({
parseProfileData: () => null,
}))
vi.mock('@/components/task/TaskStatusInline', () => ({
__esModule: true,
default: () => null,
}))
vi.mock('@/lib/task/presentation', () => ({
resolveTaskPresentationState: () => null,
}))
vi.mock('@/components/ui/icons', () => ({
AppIcon: (props: { name?: string; className?: string }) =>
createElement('span', { 'data-icon': props.name, className: props.className }),
}))
const messages = {
assets: {
stage: {
characterAssets: '角色资产',
counts: '{characterCount} 个角色,{appearanceCount} 个形象',
pendingProfilesBanner: '待确认角色',
pendingProfilesHint: '确认角色设定',
confirmAll: '全部确认',
},
character: {
add: '新建角色',
assetCount: '{count} 个形象',
copyFromGlobal: '从资产中心导入',
delete: '删除角色',
},
},
} as const
function renderWithIntl(node: ReactElement) {
const providerProps: ComponentProps<typeof NextIntlClientProvider> = {
locale: 'zh',
messages: messages as unknown as AbstractIntlMessages,
timeZone: 'Asia/Shanghai',
children: node,
}
return renderToStaticMarkup(
createElement(NextIntlClientProvider, providerProps),
)
}
describe('CharacterSection actions', () => {
it('renders import and delete actions stacked vertically with the import icon', () => {
Reflect.set(globalThis, 'React', React)
useProjectAssetsMock.mockReturnValue({
data: {
characters: [
{
id: 'character-1',
name: '西装男',
introduction: null,
appearances: [
{
id: 'appearance-1',
appearanceIndex: 0,
changeReason: '初始形象',
imageUrl: null,
imageUrls: [],
selectedIndex: null,
},
],
},
],
},
})
const html = renderWithIntl(
createElement(CharacterSection, {
projectId: 'project-1',
activeTaskKeys: new Set<string>(),
onClearTaskKey: () => undefined,
onRegisterTransientTaskKey: () => undefined,
isAnalyzingAssets: false,
onAddCharacter: () => undefined,
onDeleteCharacter: () => undefined,
onDeleteAppearance: () => undefined,
onEditAppearance: () => undefined,
handleGenerateImage: async () => undefined,
onSelectImage: () => undefined,
onConfirmSelection: () => undefined,
onRegenerateSingle: async () => undefined,
onRegenerateGroup: async () => undefined,
onUndo: () => undefined,
onImageClick: () => undefined,
onImageEdit: () => undefined,
onVoiceChange: () => undefined,
onVoiceDesign: () => undefined,
onVoiceSelectFromHub: () => undefined,
onCopyFromGlobal: () => undefined,
getAppearances: (character) => character.appearances,
unconfirmedCharacters: [],
isConfirmingCharacter: () => false,
deletingCharacterId: null,
batchConfirming: false,
batchConfirmingState: null,
onBatchConfirm: () => undefined,
onEditProfile: () => undefined,
onConfirmProfile: () => undefined,
onUseExistingProfile: () => undefined,
onDeleteProfile: () => undefined,
}),
)
expect(html).toContain('从资产中心导入')
expect(html).toContain('删除角色')
expect(html).toContain('data-icon="arrowDownCircle"')
expect(html).toContain('flex flex-col items-end gap-1.5')
})
})
@@ -0,0 +1,150 @@
import * as React from 'react'
import { createElement } from 'react'
import { renderToStaticMarkup } from 'react-dom/server'
import { describe, expect, it, vi } from 'vitest'
import { NextIntlClientProvider } from 'next-intl'
import type { AbstractIntlMessages } from 'next-intl'
const useQueryMock = vi.hoisted(() => vi.fn())
vi.mock('@tanstack/react-query', () => ({
useQuery: (options: unknown) => useQueryMock(options),
}))
vi.mock('@/components/ui/ImagePreviewModal', () => ({
__esModule: true,
default: () => null,
}))
vi.mock('@/components/task/TaskStatusInline', () => ({
__esModule: true,
default: () => null,
}))
vi.mock('@/lib/task/presentation', () => ({
resolveTaskPresentationState: () => null,
}))
vi.mock('@/components/media/MediaImageWithLoading', () => ({
MediaImageWithLoading: (props: { src: string; alt: string; className?: string; containerClassName?: string }) =>
createElement('img', {
src: props.src,
alt: props.alt,
className: [props.className, props.containerClassName].filter(Boolean).join(' '),
}),
}))
vi.mock('@/components/ui/icons', () => ({
AppIcon: (props: { name?: string; className?: string }) =>
createElement('span', { 'data-icon': props.name, className: props.className }),
}))
const messages = {
assetPicker: {
selectCharacter: '从资产中心选择角色',
selectLocation: '从资产中心选择场景',
selectProp: '从资产中心选择道具',
selectVoice: '从资产中心选择音色',
searchPlaceholder: '搜索资产名称或文件夹...',
noAssets: '资产中心暂无资产',
createInAssetHub: '请先在资产中心创建角色/场景/音色',
noSearchResults: '未找到匹配的资产',
appearances: '个形象',
images: '张图片',
cancel: '取消',
confirmCopy: '确认导入',
},
} as const
const TestIntlProvider = NextIntlClientProvider as React.ComponentType<{
locale: string
messages: AbstractIntlMessages
timeZone: string
children?: React.ReactNode
}>
describe('GlobalAssetPicker preview mapping', () => {
it('renders the real character preview image at 3:2 without the appearance count line', async () => {
Reflect.set(globalThis, 'React', React)
useQueryMock.mockReset()
useQueryMock.mockImplementation((options: { enabled?: boolean }) => ({
data: options.enabled ? [{
id: 'character-1',
kind: 'character',
family: 'visual',
scope: 'global',
name: '西装男',
folderId: null,
capabilities: {
canGenerate: true,
canSelectRender: true,
canRevertRender: true,
canModifyRender: true,
canUploadRender: true,
canBindVoice: true,
canCopyFromGlobal: false,
},
taskRefs: [],
taskState: { isRunning: false, lastError: null },
introduction: null,
profileData: null,
profileConfirmed: null,
profileTaskRefs: [],
profileTaskState: { isRunning: false, lastError: null },
voice: {
voiceType: null,
voiceId: null,
customVoiceUrl: null,
media: null,
},
variants: [{
id: 'variant-1',
index: 0,
label: '默认形象',
description: '黑西装',
selectionState: { selectedRenderIndex: 0 },
taskRefs: [],
taskState: { isRunning: false, lastError: null },
renders: [{
id: 'render-1',
index: 0,
imageUrl: 'https://example.com/character.png',
media: null,
isSelected: true,
previousImageUrl: null,
previousMedia: null,
taskRefs: [],
taskState: { isRunning: false, lastError: null },
}],
}],
}] : [],
isFetching: false,
refetch: vi.fn(),
}))
const { default: GlobalAssetPicker } = await import('@/components/shared/assets/GlobalAssetPicker')
const html = renderToStaticMarkup(
createElement(
TestIntlProvider,
{
locale: 'zh',
messages: messages as unknown as AbstractIntlMessages,
timeZone: 'Asia/Shanghai',
},
createElement(GlobalAssetPicker, {
isOpen: true,
onClose: () => undefined,
onSelect: () => undefined,
type: 'character',
}),
),
)
expect(html).toContain('src="https://example.com/character.png"')
expect(html).toContain('aspect-[3/2]')
expect(html).toContain('object-contain')
expect(html).not.toContain('data-icon="userAlt"')
expect(html).not.toContain('border-b')
expect(html).not.toContain('个形象')
})
})
@@ -0,0 +1,77 @@
import * as React from 'react'
import { createElement } from 'react'
import { describe, expect, it } from 'vitest'
import { renderToStaticMarkup } from 'react-dom/server'
import ImageGenerationInlineCountButton from '@/components/image-generation/ImageGenerationInlineCountButton'
describe('ImageGenerationInlineCountButton', () => {
it('keeps the select enabled when only the action is disabled', () => {
Reflect.set(globalThis, 'React', React)
const html = renderToStaticMarkup(
createElement(ImageGenerationInlineCountButton, {
prefix: createElement('span', null, '生成'),
suffix: createElement('span', null, '张图片'),
value: 3,
options: [1, 2, 3],
onValueChange: () => undefined,
onClick: () => undefined,
actionDisabled: true,
selectDisabled: false,
ariaLabel: '选择生成数量',
}),
)
expect(html).toContain('role="button"')
expect(html).toContain('aria-disabled="true"')
expect(html).not.toContain('<select disabled=""')
expect(html).toContain('rounded-full bg-white/12')
expect(html).toContain('inline-flex shrink-0 items-center whitespace-nowrap leading-none')
})
it('renders the count control as a rounded inline pill with the chevron inside it', () => {
Reflect.set(globalThis, 'React', React)
const html = renderToStaticMarkup(
createElement(ImageGenerationInlineCountButton, {
prefix: createElement('span', null, '重新生成'),
suffix: createElement('span', null, '张'),
value: 2,
options: [1, 2, 3],
onValueChange: () => undefined,
onClick: () => undefined,
ariaLabel: '选择重新生成数量',
}),
)
expect(html).toContain('重新生成')
expect(html).toContain('张')
expect(html).toContain('whitespace-nowrap')
expect(html).toContain('rounded-full bg-white/12')
expect(html).toContain('right-2')
expect(html).toContain('hover:bg-white/16')
})
it('can render a regenerate action without exposing the count selector', () => {
Reflect.set(globalThis, 'React', React)
const html = renderToStaticMarkup(
createElement(ImageGenerationInlineCountButton, {
prefix: createElement('span', null, '重新生成'),
suffix: null,
value: 2,
options: [1, 2, 3],
onValueChange: () => undefined,
onClick: () => undefined,
showCountControl: false,
ariaLabel: '重新生成当前图片',
className: 'inline-flex h-6 items-center justify-center rounded-md px-1.5',
}),
)
expect(html).toContain('重新生成')
expect(html).toContain('type="button"')
expect(html).not.toContain('<select')
expect(html).not.toContain('rounded-full bg-white/12')
})
})
@@ -0,0 +1,98 @@
import * as React from 'react'
import { createElement } from 'react'
import type { ComponentProps, ReactElement } from 'react'
import { describe, expect, it } from 'vitest'
import { renderToStaticMarkup } from 'react-dom/server'
import { NextIntlClientProvider } from 'next-intl'
import type { AbstractIntlMessages } from 'next-intl'
import LLMStageStreamCard from '@/components/llm-console/LLMStageStreamCard'
const messages = {
progress: {
status: {
completed: '已完成',
failed: '失败',
processing: '进行中',
queued: '排队中',
pending: '未开始',
},
stageCard: {
stage: '阶段',
realtimeStream: '实时流',
currentStage: '当前阶段',
outputTitle: 'AI 实时输出 · {stage}',
waitingModelOutput: '等待模型输出...',
reasoningNotProvided: '该步骤未返回思考过程',
},
streamStep: {
analyzeProps: '道具分析',
},
runtime: {
llm: {
processing: '模型处理中...',
},
},
},
} as const
const renderWithIntl = (node: ReactElement) => {
const providerProps: ComponentProps<typeof NextIntlClientProvider> = {
locale: 'zh',
messages: messages as unknown as AbstractIntlMessages,
timeZone: 'Asia/Shanghai',
children: node,
}
return renderToStaticMarkup(
createElement(NextIntlClientProvider, providerProps),
)
}
describe('LLMStageStreamCard error rendering', () => {
it('renders the error without any feedback action entry', () => {
Reflect.set(globalThis, 'React', React)
const html = renderWithIntl(
createElement(LLMStageStreamCard, {
title: '内容到剧本',
stages: [{
id: 'story_to_script',
title: '内容到剧本',
status: 'failed',
progress: 0,
}],
activeStageId: 'story_to_script',
outputText: '',
errorMessage: 'Failed to fetch',
}),
)
expect(html).toContain('Failed to fetch')
expect(html).not.toContain('复制错误详情')
expect(html).not.toContain('打开问题反馈表单')
expect(html).not.toContain('Copy error detail')
expect(html).not.toContain('Open feedback form')
})
it('resolves analyze props progress keys without missing message errors', () => {
Reflect.set(globalThis, 'React', React)
const html = renderWithIntl(
createElement(LLMStageStreamCard, {
title: 'progress.streamStep.analyzeProps',
stages: [{
id: 'analyze_props',
title: 'progress.streamStep.analyzeProps',
subtitle: 'progress.streamStep.analyzeProps',
status: 'processing',
progress: 35,
}],
activeStageId: 'analyze_props',
activeMessage: 'progress.streamStep.analyzeProps',
outputText: '',
}),
)
expect(html).toContain('道具分析')
expect(html).not.toContain('progress.streamStep.analyzeProps')
expect(html).not.toContain('MISSING_MESSAGE')
})
})
@@ -0,0 +1,187 @@
import * as React from 'react'
import { createElement } from 'react'
import { renderToStaticMarkup } from 'react-dom/server'
import { describe, expect, it, vi } from 'vitest'
import { NextIntlClientProvider } from 'next-intl'
import type { AbstractIntlMessages } from 'next-intl'
import { AI_EDIT_BUTTON_CLASS } from '@/components/ui/ai-edit-style'
const locationImageListMock = vi.hoisted(() => vi.fn((props: { overlayActions?: React.ReactNode }) => createElement('div', null, props.overlayActions ?? null)))
const uploadMutationMock = vi.hoisted(() => ({
isPending: false,
mutate: vi.fn(),
}))
vi.mock('@/lib/query/mutations', () => ({
useUploadProjectLocationImage: () => uploadMutationMock,
}))
vi.mock('@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/assets/location-card/LocationCardHeader', () => ({
default: () => createElement('div', null),
}))
vi.mock('@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/assets/location-card/LocationCardActions', () => ({
default: () => createElement('div', null),
}))
vi.mock('@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/assets/location-card/LocationImageList', () => ({
default: locationImageListMock,
}))
vi.mock('@/components/ui/icons', () => ({
AppIcon: () => createElement('span', null),
}))
vi.mock('@/components/ui/icons/AISparklesIcon', () => ({
default: (props: { className?: string }) => createElement('svg', { className: props.className, 'data-icon': 'ai-sparkles' }),
}))
vi.mock('@/components/task/TaskStatusInline', () => ({
default: () => createElement('span', null),
}))
vi.mock('@/components/image-generation/ImageGenerationInlineCountButton', () => ({
default: () => createElement('button', null),
}))
vi.mock('@/lib/image-generation/use-image-generation-count', () => ({
useImageGenerationCount: () => ({
count: 1,
setCount: vi.fn(),
}),
}))
vi.mock('@/lib/image-generation/count', () => ({
getImageGenerationCountOptions: () => [{ value: 1, label: '1' }],
}))
vi.mock('@/lib/task/presentation', () => ({
resolveTaskPresentationState: () => null,
}))
const messages = {
assets: {
image: {
upload: '上传图片',
uploadReplace: '上传替换图片',
edit: '编辑图片',
undo: '撤回',
regenerateStuck: '重新生成',
},
location: {
regenerateImage: '重新生成场景',
edit: '编辑场景',
delete: '删除场景',
},
prop: {
regenerateImage: '重新生成道具',
edit: '编辑道具',
delete: '删除道具',
},
},
} as const
const TestIntlProvider = NextIntlClientProvider as React.ComponentType<{
locale: string
messages: AbstractIntlMessages
timeZone: string
children?: React.ReactNode
}>
describe('LocationCard AI edit button', () => {
it('uses the shared AI edit button style in single-image mode', async () => {
locationImageListMock.mockClear()
Reflect.set(globalThis, 'React', React)
const { default: LocationCard } = await import('@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/assets/LocationCard')
const html = renderToStaticMarkup(
createElement(
TestIntlProvider,
{
locale: 'zh',
messages: messages as unknown as AbstractIntlMessages,
timeZone: 'Asia/Shanghai',
},
createElement(LocationCard, {
location: {
id: 'prop-1',
name: '银质餐具',
summary: '银质西式餐具套装',
selectedImageId: 'prop-image-1',
images: [
{
id: 'prop-image-1',
imageIndex: 0,
description: '银质餐具套装,包含刀叉与汤匙,金属光泽冷白',
imageUrl: 'https://example.com/prop.png',
previousImageUrl: null,
previousDescription: null,
isSelected: true,
},
],
},
assetType: 'prop',
onEdit: () => undefined,
onDelete: () => undefined,
onRegenerate: () => undefined,
onGenerate: () => undefined,
onImageClick: () => undefined,
onImageEdit: () => undefined,
projectId: 'project-1',
}),
),
)
expect(html).toContain('data-icon=\"ai-sparkles\"')
for (const token of AI_EDIT_BUTTON_CLASS.split(' ')) {
expect(html).toContain(token)
}
const firstCall = locationImageListMock.mock.calls[0]?.[0] as { aspectClassName?: string } | undefined
expect(firstCall?.aspectClassName).toBe('aspect-[3/2]')
})
it('passes a square image slot to project location cards', async () => {
locationImageListMock.mockClear()
Reflect.set(globalThis, 'React', React)
const { default: LocationCard } = await import('@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/assets/LocationCard')
renderToStaticMarkup(
createElement(
TestIntlProvider,
{
locale: 'zh',
messages: messages as unknown as AbstractIntlMessages,
timeZone: 'Asia/Shanghai',
},
createElement(LocationCard, {
location: {
id: 'location-1',
name: '餐厅',
summary: '极简餐厅',
selectedImageId: 'location-image-1',
images: [
{
id: 'location-image-1',
imageIndex: 0,
description: '极简餐厅室内空间',
imageUrl: 'https://example.com/location.png',
previousImageUrl: null,
previousDescription: null,
isSelected: true,
},
],
},
assetType: 'location',
onEdit: () => undefined,
onDelete: () => undefined,
onRegenerate: () => undefined,
onGenerate: () => undefined,
onImageClick: () => undefined,
onImageEdit: () => undefined,
projectId: 'project-1',
}),
),
)
const firstCall = locationImageListMock.mock.calls[0]?.[0] as { aspectClassName?: string } | undefined
expect(firstCall?.aspectClassName).toBe('aspect-square')
})
})
@@ -0,0 +1,81 @@
import * as React from 'react'
import { createElement } from 'react'
import type { ComponentProps, ReactElement } from 'react'
import { describe, expect, it, vi } from 'vitest'
import { renderToStaticMarkup } from 'react-dom/server'
import { NextIntlClientProvider } from 'next-intl'
import type { AbstractIntlMessages } from 'next-intl'
import { LocationCreationModal } from '@/components/shared/assets/LocationCreationModal'
vi.mock('@/lib/query/hooks', () => ({
useAiCreateProjectLocation: vi.fn(() => ({ mutateAsync: vi.fn() })),
useAiDesignLocation: vi.fn(() => ({ mutateAsync: vi.fn() })),
useCreateAssetHubLocation: vi.fn(() => ({ mutateAsync: vi.fn() })),
useGenerateLocationImage: vi.fn(() => ({ mutateAsync: vi.fn() })),
useCreateProjectLocation: vi.fn(() => ({ mutateAsync: vi.fn() })),
useGenerateProjectLocationImage: vi.fn(() => ({ mutateAsync: vi.fn() })),
}))
const messages = {
assetModal: {
location: {
title: '新建场景',
name: '场景名称',
namePlaceholder: '请输入场景名称',
description: '场景描述',
descPlaceholder: '请输入场景描述...',
},
artStyle: { title: '画面风格' },
aiDesign: {
title: 'AI 设计',
placeholderLocation: '描述场景氛围和环境...',
generating: '设计中...',
generate: '生成',
tip: '输入简单描述,AI 帮你生成详细设定',
},
common: {
cancel: '取消',
addOnlyLocation: '仅添加场景',
addOnlyToAssetHubLocation: '仅添加场景到资产库',
addAndGeneratePrefix: '添加并生成',
generateCountSuffix: '张图片',
selectGenerateCount: '选择生成数量',
optional: '(可选)',
},
errors: {
createFailed: '创建失败',
aiDesignFailed: 'AI 设计失败',
insufficientBalance: '账户余额不足',
},
},
} as const
const renderWithIntl = (node: ReactElement) => {
const providerProps: ComponentProps<typeof NextIntlClientProvider> = {
locale: 'zh',
messages: messages as unknown as AbstractIntlMessages,
timeZone: 'Asia/Shanghai',
children: node,
}
return renderToStaticMarkup(
createElement(NextIntlClientProvider, providerProps),
)
}
describe('LocationCreationModal', () => {
it('renders add-only and add-and-generate actions in the fixed footer', () => {
Reflect.set(globalThis, 'React', React)
const html = renderWithIntl(
createElement(LocationCreationModal, {
mode: 'asset-hub',
onClose: () => undefined,
onSuccess: () => undefined,
}),
)
expect(html).toContain('仅添加场景到资产库')
expect(html).toContain('添加并生成')
expect(html).toContain('取消')
})
})
@@ -0,0 +1,115 @@
import * as React from 'react'
import { createElement } from 'react'
import type { ComponentProps, ReactElement } from 'react'
import { describe, expect, it, vi } from 'vitest'
import { renderToStaticMarkup } from 'react-dom/server'
import { NextIntlClientProvider } from 'next-intl'
import type { AbstractIntlMessages } from 'next-intl'
import LocationSection from '@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/assets/LocationSection'
const locationCardMock = vi.hoisted(() => vi.fn((_props: unknown) => null))
const useProjectAssetsMock = vi.hoisted(() => vi.fn())
vi.mock('@/lib/query/hooks/useProjectAssets', () => ({
useProjectAssets: (projectId: string | null) => useProjectAssetsMock(projectId),
}))
vi.mock('@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/assets/LocationCard', () => ({
default: (props: unknown) => locationCardMock(props),
}))
vi.mock('@/components/ui/icons', () => ({
AppIcon: () => null,
}))
const messages = {
assets: {
stage: {
locationAssets: '场景资产',
locationCounts: '{count} 个场景',
propAssets: '道具资产',
propCounts: '{count} 个道具',
},
location: {
add: '新建场景',
},
prop: {
add: '新建道具',
},
},
} as const
function renderWithIntl(node: ReactElement) {
const providerProps: ComponentProps<typeof NextIntlClientProvider> = {
locale: 'zh',
messages: messages as unknown as AbstractIntlMessages,
timeZone: 'Asia/Shanghai',
children: node,
}
return renderToStaticMarkup(
createElement(NextIntlClientProvider, providerProps),
)
}
describe('LocationSection prop confirm wiring', () => {
it('passes the confirm-selection callback through to prop cards', () => {
Reflect.set(globalThis, 'React', React)
locationCardMock.mockClear()
useProjectAssetsMock.mockReturnValue({
data: {
characters: [],
locations: [],
props: [{
id: 'prop-1',
name: '青铜匕首',
summary: '古旧短刃',
selectedImageId: 'prop-image-2',
images: [
{
id: 'prop-image-1',
imageIndex: 0,
description: '候选 1',
imageUrl: 'https://example.com/prop-1.png',
isSelected: false,
},
{
id: 'prop-image-2',
imageIndex: 1,
description: '候选 2',
imageUrl: 'https://example.com/prop-2.png',
isSelected: true,
},
],
}],
},
})
renderWithIntl(
createElement(LocationSection, {
projectId: 'project-1',
assetType: 'prop',
activeTaskKeys: new Set<string>(),
onClearTaskKey: () => undefined,
onRegisterTransientTaskKey: () => undefined,
onAddLocation: () => undefined,
onDeleteLocation: () => undefined,
onEditLocation: () => undefined,
handleGenerateImage: async () => undefined,
onSelectImage: () => undefined,
onConfirmSelection: () => undefined,
onRegenerateSingle: async () => undefined,
onRegenerateGroup: async () => undefined,
onUndo: () => undefined,
onImageClick: () => undefined,
onImageEdit: () => undefined,
onCopyFromGlobal: () => undefined,
filterIds: null,
}),
)
const firstCall = locationCardMock.mock.calls[0]?.[0] as { onConfirmSelection?: () => void } | undefined
expect(firstCall).toBeDefined()
expect(typeof firstCall?.onConfirmSelection).toBe('function')
})
})
@@ -0,0 +1,72 @@
import * as React from 'react'
import { createElement } from 'react'
import { renderToStaticMarkup } from 'react-dom/server'
import { afterEach, describe, expect, it, vi } from 'vitest'
import LongTextDetectionPrompt from '@/components/story-input/LongTextDetectionPrompt'
const portalMocks = vi.hoisted(() => {
return {
currentPortalTarget: null as unknown,
createPortalMock: vi.fn((node: React.ReactNode, target: unknown) => {
const targetLabel = target === portalMocks.currentPortalTarget ? 'body' : 'unknown'
return createElement('div', { 'data-portal-target': targetLabel }, node)
}),
}
})
vi.mock('react-dom', async () => {
const actual = await vi.importActual<typeof import('react-dom')>('react-dom')
return {
...actual,
createPortal: portalMocks.createPortalMock,
}
})
vi.mock('@/components/ui/icons', () => ({
AppIcon: ({ name, className }: { name: string; className?: string }) =>
createElement('span', { 'data-icon': name, className }),
}))
describe('LongTextDetectionPrompt', () => {
afterEach(() => {
vi.clearAllMocks()
portalMocks.currentPortalTarget = null
Reflect.deleteProperty(globalThis, 'React')
Reflect.deleteProperty(globalThis, 'document')
})
it('renders through document.body at modal layer without the removed gradient border wrapper', () => {
const fakeDocument = {
body: { nodeName: 'BODY' },
}
Reflect.set(globalThis, 'React', React)
Reflect.set(globalThis, 'document', fakeDocument)
portalMocks.currentPortalTarget = fakeDocument.body
const html = renderToStaticMarkup(
createElement(LongTextDetectionPrompt, {
open: true,
copy: {
title: '建议使用智能分集',
description: '检测到文本较长',
strongRecommend: '建议拆分',
smartSplitLabel: '智能分集',
smartSplitBadge: '推荐',
continueLabel: '仍然单集创作',
continueHint: '单集模式',
},
onClose: () => undefined,
onSmartSplit: () => undefined,
onContinue: () => undefined,
}),
)
expect(portalMocks.createPortalMock).toHaveBeenCalledTimes(1)
expect(portalMocks.createPortalMock.mock.calls[0]?.[1]).toBe(fakeDocument.body)
expect(html).toContain('data-portal-target="body"')
expect(html).toContain('z-[120]')
expect(html).toContain('border-[var(--glass-stroke-base)]')
expect(html).not.toContain('p-[1.5px]')
})
})
@@ -0,0 +1,29 @@
import { describe, expect, it } from 'vitest'
import { lockModalPageScroll } from '@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/storyboard/modal-scroll-lock'
describe('modal scroll lock', () => {
it('locks page scroll while modal is open and restores previous styles on cleanup', () => {
const doc = {
body: {
style: {
overflow: 'auto',
},
},
documentElement: {
style: {
overflow: 'scroll',
},
},
}
const restore = lockModalPageScroll(doc)
expect(doc.body.style.overflow).toBe('hidden')
expect(doc.documentElement.style.overflow).toBe('hidden')
restore()
expect(doc.body.style.overflow).toBe('auto')
expect(doc.documentElement.style.overflow).toBe('scroll')
})
})
@@ -0,0 +1,116 @@
import * as React from 'react'
import { createElement } from 'react'
import type { ComponentProps, ReactElement } from 'react'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { renderToStaticMarkup } from 'react-dom/server'
import { NextIntlClientProvider } from 'next-intl'
import type { AbstractIntlMessages } from 'next-intl'
import Navbar from '@/components/Navbar'
const useSessionMock = vi.fn()
vi.mock('next-auth/react', () => ({
useSession: () => useSessionMock(),
}))
vi.mock('next/image', () => ({
default: ({ alt, ...props }: { alt: string } & Record<string, unknown>) => createElement('img', { alt, ...props }),
}))
vi.mock('@/components/LanguageSwitcher', () => ({
default: () => createElement('div', null, 'LanguageSwitcher'),
}))
vi.mock('@/hooks/common/useGithubReleaseUpdate', () => ({
useGithubReleaseUpdate: () => ({
currentVersion: '0.3.0',
update: null,
shouldPulse: false,
showModal: false,
openModal: () => undefined,
dismissCurrentUpdate: () => undefined,
checkNow: async () => undefined,
}),
}))
vi.mock('@/i18n/navigation', () => ({
Link: ({
href,
children,
...props
}: {
href: string | { pathname: string }
children: React.ReactNode
} & Record<string, unknown>) => {
const resolvedHref = typeof href === 'string' ? href : href.pathname
return createElement('a', { href: resolvedHref, ...props }, children)
},
}))
const messages = {
nav: {
workspace: '工作区',
assetHub: '资产中心',
profile: '设置中心',
downloadLogs: '下载日志',
signin: '登录',
signup: '注册',
},
common: {
appName: 'waoowaoo',
betaVersion: 'Beta v{version}',
updateNotice: {
openDialog: '打开更新弹窗',
updateTag: '更新',
checkUpdate: '检查更新',
upToDate: '已是最新版本',
},
},
} as const
const renderWithIntl = (node: ReactElement) => {
const providerProps: ComponentProps<typeof NextIntlClientProvider> = {
locale: 'zh',
messages: messages as unknown as AbstractIntlMessages,
timeZone: 'Asia/Shanghai',
children: node,
}
return renderToStaticMarkup(
createElement(NextIntlClientProvider, providerProps),
)
}
describe('Navbar download logs entry', () => {
beforeEach(() => {
useSessionMock.mockReset()
})
it('renders the download logs entry on the far-right action group for signed-in users', () => {
Reflect.set(globalThis, 'React', React)
useSessionMock.mockReturnValue({
data: { user: { name: 'Earth' } },
status: 'authenticated',
})
const html = renderWithIntl(createElement(Navbar))
expect(html).toContain('下载日志')
expect(html).toContain('href="/home"')
expect(html).toContain('href="/api/admin/download-logs"')
expect(html).toContain('download=""')
})
it('does not render the download logs entry for signed-out users', () => {
Reflect.set(globalThis, 'React', React)
useSessionMock.mockReturnValue({
data: null,
status: 'unauthenticated',
})
const html = renderWithIntl(createElement(Navbar))
expect(html).not.toContain('下载日志')
expect(html).not.toContain('/api/admin/download-logs')
})
})
@@ -0,0 +1,107 @@
import * as React from 'react'
import { createElement } from 'react'
import { renderToStaticMarkup } from 'react-dom/server'
import { afterEach, describe, expect, it, vi } from 'vitest'
import { RatioSelector, StylePresetSelector, StyleSelector } from '@/components/selectors/RatioStyleSelectors'
const portalMocks = vi.hoisted(() => {
return {
currentPortalTarget: null as unknown,
createPortalMock: vi.fn((node: React.ReactNode, target: unknown) => {
const targetLabel = target === portalMocks.currentPortalTarget ? 'body' : 'unknown'
return createElement('div', { 'data-portal-target': targetLabel }, node)
}),
}
})
vi.mock('react', async (importOriginal) => {
const actual = await importOriginal<typeof import('react')>()
return {
...actual,
useState: <T,>(initialState: T | (() => T)) => {
const resolvedInitialState = typeof initialState === 'function'
? (initialState as () => T)()
: initialState
if (resolvedInitialState === false) {
return actual.useState(true as T)
}
return actual.useState(resolvedInitialState)
},
}
})
vi.mock('react-dom', async () => {
const actual = await vi.importActual<typeof import('react-dom')>('react-dom')
return {
...actual,
createPortal: portalMocks.createPortalMock,
}
})
vi.mock('@/components/ui/icons', () => ({
AppIcon: ({ name, className }: { name: string; className?: string }) =>
createElement('span', { 'data-icon': name, className }),
}))
describe('RatioStyleSelectors', () => {
afterEach(() => {
vi.clearAllMocks()
portalMocks.currentPortalTarget = null
Reflect.deleteProperty(globalThis, 'React')
Reflect.deleteProperty(globalThis, 'document')
})
it('renders ratio, style, and style preset dropdown panels through a portal to document.body', () => {
const fakeDocument = {
body: { nodeName: 'BODY' },
}
Reflect.set(globalThis, 'React', React)
portalMocks.currentPortalTarget = fakeDocument.body
Reflect.set(globalThis, 'document', fakeDocument)
const html = renderToStaticMarkup(
createElement('div', null,
createElement(RatioSelector, {
value: '9:16',
onChange: () => undefined,
options: [
{ value: '9:16', label: '9:16' },
{ value: '16:9', label: '16:9' },
],
}),
createElement(StyleSelector, {
value: 'realistic',
onChange: () => undefined,
options: [
{ value: 'realistic', label: '真人风格' },
{ value: 'american-comic', label: '美漫风格' },
],
}),
createElement(StylePresetSelector, {
value: 'horror-suspense',
onChange: () => undefined,
options: [
{ value: 'horror-suspense', label: '恐怖悬疑', description: '压迫氛围' },
{ value: 'dark-noir', label: '暗黑黑色', description: '冷峻低照' },
],
}),
),
)
expect(portalMocks.createPortalMock).toHaveBeenCalledTimes(3)
expect(portalMocks.createPortalMock.mock.calls[0]?.[1]).toBe(fakeDocument.body)
expect(portalMocks.createPortalMock.mock.calls[1]?.[1]).toBe(fakeDocument.body)
expect(portalMocks.createPortalMock.mock.calls[2]?.[1]).toBe(fakeDocument.body)
expect(html).toContain('data-portal-target="body"')
expect(html).toContain('data-icon="sparklesAlt"')
expect(html).toContain('data-icon="clapperboard"')
expect(html).toContain('真人风格')
expect(html).toContain('16:9')
expect(html).toContain('恐怖悬疑')
expect(html).toContain('压迫氛围')
})
})
@@ -0,0 +1,29 @@
import * as React from 'react'
import { createElement } from 'react'
import { describe, expect, it } from 'vitest'
import { renderToStaticMarkup } from 'react-dom/server'
import { SegmentedControl } from '@/components/ui/SegmentedControl'
describe('SegmentedControl', () => {
it('compact 布局 -> 输出左对齐的非拉伸结构', () => {
Reflect.set(globalThis, 'React', React)
const html = renderToStaticMarkup(
createElement(SegmentedControl, {
options: [
{ value: 'all', label: '全部 (24)' },
{ value: 'character', label: '角色 (11)' },
{ value: 'location', label: '场景 (13)' },
{ value: 'prop', label: '道具 (0)' },
],
value: 'all',
onChange: () => undefined,
layout: 'compact',
}),
)
expect(html).toContain('inline-block max-w-full')
expect(html).toContain('inline-grid grid-flow-col auto-cols-[minmax(96px,max-content)]')
expect(html).not.toContain('grid-template-columns:repeat(4,minmax(0,1fr))')
})
})
@@ -0,0 +1,78 @@
import * as React from 'react'
import { createElement } from 'react'
import { renderToStaticMarkup } from 'react-dom/server'
import { describe, expect, it, vi } from 'vitest'
import StoryInputComposer from '@/components/story-input/StoryInputComposer'
vi.mock('@/components/selectors/RatioStyleSelectors', () => ({
RatioSelector: ({
getUsage: _getUsage,
...props
}: Record<string, unknown> & { getUsage?: unknown }) => createElement('div', props, 'RatioSelector'),
StyleSelector: (props: Record<string, unknown>) => createElement('div', props, 'StyleSelector'),
StylePresetSelector: (props: Record<string, unknown>) => createElement('div', props, 'StylePresetSelector'),
}))
describe('StoryInputComposer', () => {
it('renders a shared composer shell with configurable textarea rows and shared controls', () => {
Reflect.set(globalThis, 'React', React)
const html = renderToStaticMarkup(
createElement(StoryInputComposer, {
value: '测试内容',
onValueChange: () => undefined,
placeholder: '请输入内容',
minRows: 8,
videoRatio: '9:16',
onVideoRatioChange: () => undefined,
ratioOptions: [{ value: '9:16', label: '9:16' }],
artStyle: 'realistic',
onArtStyleChange: () => undefined,
styleOptions: [{ value: 'realistic', label: '真人风格' }],
stylePresetValue: 'horror-suspense',
onStylePresetChange: () => undefined,
stylePresetOptions: [{ value: 'horror-suspense', label: '恐怖悬疑', description: '压迫氛围' }],
topRight: createElement('span', null, '字数:4'),
footer: createElement('p', null, '当前配置'),
secondaryActions: createElement('button', { type: 'button' }, 'AI 帮我写'),
primaryAction: createElement('button', { type: 'button' }, '开始创作'),
}),
)
expect(html).toContain('rows="8"')
expect(html).toContain('RatioSelector')
expect(html).toContain('StyleSelector')
expect(html).toContain('StylePresetSelector')
expect(html).toContain('字数:4')
expect(html).toContain('当前配置')
expect(html).toContain('AI 帮我写')
expect(html).toContain('开始创作')
})
it('hides the style preset selector when no preset is enabled', () => {
Reflect.set(globalThis, 'React', React)
const html = renderToStaticMarkup(
createElement(StoryInputComposer, {
value: '测试内容',
onValueChange: () => undefined,
placeholder: '请输入内容',
minRows: 8,
videoRatio: '9:16',
onVideoRatioChange: () => undefined,
ratioOptions: [{ value: '9:16', label: '9:16' }],
artStyle: 'realistic',
onArtStyleChange: () => undefined,
styleOptions: [{ value: 'realistic', label: '真人风格' }],
stylePresetValue: '',
onStylePresetChange: () => undefined,
stylePresetOptions: [],
primaryAction: createElement('button', { type: 'button' }, '开始创作'),
}),
)
expect(html).toContain('RatioSelector')
expect(html).toContain('StyleSelector')
expect(html).not.toContain('StylePresetSelector')
})
})
@@ -0,0 +1,68 @@
import { describe, expect, it, vi } from 'vitest'
import {
DEFAULT_VOICE_SCHEME_COUNT,
MAX_VOICE_SCHEME_COUNT,
MIN_VOICE_SCHEME_COUNT,
generateVoiceDesignOptions,
normalizeVoiceSchemeCount,
} from '@/components/voice/voice-design-shared'
describe('voice-design-shared', () => {
it('clamps scheme count into the supported range', () => {
expect(normalizeVoiceSchemeCount(undefined)).toBe(DEFAULT_VOICE_SCHEME_COUNT)
expect(normalizeVoiceSchemeCount('not-a-number')).toBe(DEFAULT_VOICE_SCHEME_COUNT)
expect(normalizeVoiceSchemeCount(0)).toBe(MIN_VOICE_SCHEME_COUNT)
expect(normalizeVoiceSchemeCount(99)).toBe(MAX_VOICE_SCHEME_COUNT)
expect(normalizeVoiceSchemeCount('5')).toBe(5)
})
it('generates the requested number of voice options with default preview text fallback', async () => {
const onDesignVoice = vi
.fn<(_: {
voicePrompt: string
previewText: string
preferredName: string
language: 'zh'
}) => Promise<{ voiceId: string; audioBase64: string }>>()
.mockResolvedValueOnce({ voiceId: 'voice-1', audioBase64: 'audio-1' })
.mockResolvedValueOnce({ voiceId: 'voice-2', audioBase64: 'audio-2' })
.mockResolvedValueOnce({ voiceId: 'voice-3', audioBase64: 'audio-3' })
.mockResolvedValueOnce({ voiceId: 'voice-4', audioBase64: 'audio-4' })
const result = await generateVoiceDesignOptions({
count: '4',
voicePrompt: ' 温柔女声 ',
previewText: ' ',
defaultPreviewText: '默认试听文案',
onDesignVoice,
createPreferredName: (index) => `preferred-${index + 1}`,
})
expect(result).toEqual([
{ voiceId: 'voice-1', audioBase64: 'audio-1', audioUrl: 'data:audio/wav;base64,audio-1' },
{ voiceId: 'voice-2', audioBase64: 'audio-2', audioUrl: 'data:audio/wav;base64,audio-2' },
{ voiceId: 'voice-3', audioBase64: 'audio-3', audioUrl: 'data:audio/wav;base64,audio-3' },
{ voiceId: 'voice-4', audioBase64: 'audio-4', audioUrl: 'data:audio/wav;base64,audio-4' },
])
expect(onDesignVoice.mock.calls).toEqual([
[{ voicePrompt: '温柔女声', previewText: '默认试听文案', preferredName: 'preferred-1', language: 'zh' }],
[{ voicePrompt: '温柔女声', previewText: '默认试听文案', preferredName: 'preferred-2', language: 'zh' }],
[{ voicePrompt: '温柔女声', previewText: '默认试听文案', preferredName: 'preferred-3', language: 'zh' }],
[{ voicePrompt: '温柔女声', previewText: '默认试听文案', preferredName: 'preferred-4', language: 'zh' }],
])
})
it('fails explicitly when a designed voice is missing voiceId', async () => {
const onDesignVoice = vi.fn(async () => ({ voiceId: '', audioBase64: 'audio-only' }))
await expect(
generateVoiceDesignOptions({
count: 1,
voicePrompt: '旁白',
previewText: '测试',
defaultPreviewText: '默认试听文案',
onDesignVoice,
}),
).rejects.toThrow('VOICE_DESIGN_INVALID_RESPONSE: missing voiceId')
})
})
@@ -0,0 +1,64 @@
import * as React from 'react'
import { createElement } from 'react'
import { describe, expect, it, vi } from 'vitest'
import { renderToStaticMarkup } from 'react-dom/server'
import WorkspaceRunStreamConsoles from '@/app/[locale]/workspace/[projectId]/modes/novel-promotion/components/WorkspaceRunStreamConsoles'
vi.mock('next-intl', () => ({
useTranslations: () => (key: string) => key,
}))
vi.mock('@/components/llm-console/LLMStageStreamCard', () => ({
__esModule: true,
default: ({ title }: { title: string }) => createElement('section', null, `LLMStageStreamCard:${title}`),
}))
function createStreamState(overrides?: Partial<React.ComponentProps<typeof WorkspaceRunStreamConsoles>['storyToScriptStream']>) {
return {
status: 'running' as const,
isVisible: true,
isRecoveredRunning: true,
stages: [],
selectedStep: null,
activeStepId: null,
outputText: '',
activeMessage: '',
overallProgress: 0,
isRunning: false,
errorMessage: '',
stop: () => undefined,
reset: () => undefined,
selectStep: () => undefined,
retryStep: async () => ({
runId: 'run-1',
status: 'running',
summary: null,
payload: null,
errorMessage: '',
}),
...overrides,
}
}
describe('WorkspaceRunStreamConsoles', () => {
it('shows fallback running console when a recovered run has no stages yet', () => {
Reflect.set(globalThis, 'React', React)
const html = renderToStaticMarkup(
createElement(WorkspaceRunStreamConsoles, {
storyToScriptStream: createStreamState(),
scriptToStoryboardStream: createStreamState({
status: 'idle',
isVisible: false,
isRecoveredRunning: false,
}),
storyToScriptConsoleMinimized: false,
scriptToStoryboardConsoleMinimized: true,
onStoryToScriptMinimizedChange: () => undefined,
onScriptToStoryboardMinimizedChange: () => undefined,
}),
)
expect(html).toContain('LLMStageStreamCard:runConsole.storyToScript')
})
})
@@ -0,0 +1,105 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const resolveModelSelectionMock = vi.hoisted(() =>
vi.fn(async () => ({
provider: 'openai-compatible:oa-1',
modelId: 'gpt-image-1',
modelKey: 'openai-compatible:oa-1::gpt-image-1',
mediaType: 'image',
compatMediaTemplate: undefined,
})),
)
const getProviderConfigMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'openai-compatible:oa-1',
name: 'OpenAI Compat',
apiKey: 'oa-key',
gatewayRoute: 'openai-compat' as const,
})),
)
const resolveModelGatewayRouteMock = vi.hoisted(() => vi.fn(() => 'openai-compat'))
const generateImageViaOpenAICompatMock = vi.hoisted(() => vi.fn(async () => ({ success: true, imageUrl: 'image' })))
const generateVideoViaOpenAICompatMock = vi.hoisted(() => vi.fn(async () => ({ success: true, videoUrl: 'video' })))
const generateImageViaOpenAICompatTemplateMock = vi.hoisted(() => vi.fn(async () => ({ success: true, imageUrl: 'image' })))
const generateVideoViaOpenAICompatTemplateMock = vi.hoisted(() => vi.fn(async () => ({ success: true, videoUrl: 'video' })))
vi.mock('@/lib/api-config', () => ({
resolveModelSelection: resolveModelSelectionMock,
getProviderConfig: getProviderConfigMock,
getProviderKey: (providerId: string) => providerId.split(':')[0] || providerId,
}))
vi.mock('@/lib/model-gateway', () => ({
resolveModelGatewayRoute: resolveModelGatewayRouteMock,
generateImageViaOpenAICompat: generateImageViaOpenAICompatMock,
generateVideoViaOpenAICompat: generateVideoViaOpenAICompatMock,
generateImageViaOpenAICompatTemplate: generateImageViaOpenAICompatTemplateMock,
generateVideoViaOpenAICompatTemplate: generateVideoViaOpenAICompatTemplateMock,
}))
vi.mock('@/lib/generators/factory', () => ({
createImageGenerator: vi.fn(() => ({ generate: vi.fn() })),
createVideoGenerator: vi.fn(() => ({ generate: vi.fn() })),
createAudioGenerator: vi.fn(() => ({ generate: vi.fn() })),
}))
vi.mock('@/lib/providers/bailian', () => ({
generateBailianImage: vi.fn(),
generateBailianVideo: vi.fn(),
generateBailianAudio: vi.fn(),
}))
vi.mock('@/lib/providers/siliconflow', () => ({
generateSiliconFlowImage: vi.fn(),
generateSiliconFlowVideo: vi.fn(),
generateSiliconFlowAudio: vi.fn(),
}))
import { generateImage, generateVideo } from '@/lib/generator-api'
describe('generator-api requires compat media template for openai-compatible media', () => {
beforeEach(() => {
vi.clearAllMocks()
resolveModelGatewayRouteMock.mockReturnValue('openai-compat')
getProviderConfigMock.mockResolvedValue({
id: 'openai-compatible:oa-1',
name: 'OpenAI Compat',
apiKey: 'oa-key',
gatewayRoute: 'openai-compat',
})
})
it('throws for image model without compatMediaTemplate', async () => {
resolveModelSelectionMock.mockResolvedValueOnce({
provider: 'openai-compatible:oa-1',
modelId: 'gpt-image-1',
modelKey: 'openai-compatible:oa-1::gpt-image-1',
mediaType: 'image',
compatMediaTemplate: undefined,
})
await expect(
generateImage('user-1', 'openai-compatible:oa-1::gpt-image-1', 'draw cat'),
).rejects.toThrow('MODEL_COMPAT_MEDIA_TEMPLATE_REQUIRED')
expect(generateImageViaOpenAICompatMock).not.toHaveBeenCalled()
expect(generateImageViaOpenAICompatTemplateMock).not.toHaveBeenCalled()
})
it('throws for video model without compatMediaTemplate', async () => {
resolveModelSelectionMock.mockResolvedValueOnce({
provider: 'openai-compatible:oa-1',
modelId: 'veo3.1',
modelKey: 'openai-compatible:oa-1::veo3.1',
mediaType: 'video',
compatMediaTemplate: undefined,
})
await expect(
generateVideo('user-1', 'openai-compatible:oa-1::veo3.1', 'https://example.com/a.png', { prompt: 'animate' }),
).rejects.toThrow('MODEL_COMPAT_MEDIA_TEMPLATE_REQUIRED')
expect(generateVideoViaOpenAICompatMock).not.toHaveBeenCalled()
expect(generateVideoViaOpenAICompatTemplateMock).not.toHaveBeenCalled()
})
})
+301
View File
@@ -0,0 +1,301 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const resolveModelSelectionMock = vi.hoisted(() =>
vi.fn<typeof import('@/lib/api-config').resolveModelSelection>(async () => ({
provider: 'google',
modelId: 'gemini-3.1',
modelKey: 'google::gemini-3.1',
mediaType: 'image',
})),
)
const getProviderConfigMock = vi.hoisted(() =>
vi.fn<typeof import('@/lib/api-config').getProviderConfig>(async () => ({
id: 'google',
name: 'Google',
apiKey: 'google-key',
apiMode: undefined,
gatewayRoute: undefined,
})),
)
const generateImageViaOpenAICompatMock = vi.hoisted(() => vi.fn(async () => ({ success: true, imageUrl: 'compat-image' })))
const generateVideoViaOpenAICompatMock = vi.hoisted(() => vi.fn(async () => ({ success: true, videoUrl: 'compat-video' })))
const generateImageViaOpenAICompatTemplateMock = vi.hoisted(() => vi.fn(async () => ({ success: true, imageUrl: 'compat-template-image' })))
const generateVideoViaOpenAICompatTemplateMock = vi.hoisted(() => vi.fn(async () => ({ success: true, videoUrl: 'compat-template-video' })))
const resolveModelGatewayRouteMock = vi.hoisted(() => vi.fn(() => 'official'))
const imageGeneratorGenerateMock = vi.hoisted(() => vi.fn(async () => ({ success: true, imageUrl: 'official-image' })))
const videoGeneratorGenerateMock = vi.hoisted(() => vi.fn(async () => ({ success: true, videoUrl: 'official-video' })))
const audioGeneratorGenerateMock = vi.hoisted(() => vi.fn(async () => ({ success: true, audioUrl: 'audio' })))
const createImageGeneratorMock = vi.hoisted(() => vi.fn(() => ({ generate: imageGeneratorGenerateMock })))
const createVideoGeneratorMock = vi.hoisted(() => vi.fn(() => ({ generate: videoGeneratorGenerateMock })))
const createAudioGeneratorMock = vi.hoisted(() => vi.fn(() => ({ generate: audioGeneratorGenerateMock })))
const generateBailianImageMock = vi.hoisted(() => vi.fn(async () => ({ success: true, imageUrl: 'bailian-image' })))
const generateBailianVideoMock = vi.hoisted(() => vi.fn(async () => ({ success: true, videoUrl: 'bailian-video' })))
const generateBailianAudioMock = vi.hoisted(() => vi.fn(async () => ({ success: true, audioUrl: 'bailian-audio' })))
const generateSiliconFlowImageMock = vi.hoisted(() => vi.fn(async () => ({ success: true, imageUrl: 'siliconflow-image' })))
const generateSiliconFlowVideoMock = vi.hoisted(() => vi.fn(async () => ({ success: true, videoUrl: 'siliconflow-video' })))
const generateSiliconFlowAudioMock = vi.hoisted(() => vi.fn(async () => ({ success: true, audioUrl: 'siliconflow-audio' })))
vi.mock('@/lib/api-config', async (importOriginal) => {
const actual = await importOriginal<typeof import('@/lib/api-config')>()
return {
...actual,
resolveModelSelection: resolveModelSelectionMock,
getProviderConfig: getProviderConfigMock,
}
})
vi.mock('@/lib/model-gateway', async (importOriginal) => {
const actual = await importOriginal<typeof import('@/lib/model-gateway')>()
return {
...actual,
generateImageViaOpenAICompat: generateImageViaOpenAICompatMock,
generateVideoViaOpenAICompat: generateVideoViaOpenAICompatMock,
generateImageViaOpenAICompatTemplate: generateImageViaOpenAICompatTemplateMock,
generateVideoViaOpenAICompatTemplate: generateVideoViaOpenAICompatTemplateMock,
resolveModelGatewayRoute: resolveModelGatewayRouteMock,
}
})
vi.mock('@/lib/generators/factory', () => ({
createImageGenerator: createImageGeneratorMock,
createVideoGenerator: createVideoGeneratorMock,
createAudioGenerator: createAudioGeneratorMock,
}))
vi.mock('@/lib/providers/bailian', () => ({
generateBailianImage: generateBailianImageMock,
generateBailianVideo: generateBailianVideoMock,
generateBailianAudio: generateBailianAudioMock,
}))
vi.mock('@/lib/providers/siliconflow', () => ({
generateSiliconFlowImage: generateSiliconFlowImageMock,
generateSiliconFlowVideo: generateSiliconFlowVideoMock,
generateSiliconFlowAudio: generateSiliconFlowAudioMock,
}))
import { generateAudio, generateImage, generateVideo } from '@/lib/generator-api'
describe('generator-api gateway routing', () => {
beforeEach(() => {
vi.clearAllMocks()
resolveModelGatewayRouteMock.mockReset()
resolveModelGatewayRouteMock.mockReturnValue('official')
getProviderConfigMock.mockResolvedValue({
id: 'google',
name: 'Google',
apiKey: 'google-key',
apiMode: undefined,
gatewayRoute: undefined,
})
})
it('routes openai-compatible image requests to openai-compat gateway', async () => {
resolveModelSelectionMock.mockResolvedValueOnce({
provider: 'openai-compatible:oa-1',
modelId: 'gpt-image-1',
modelKey: 'openai-compatible:oa-1::gpt-image-1',
mediaType: 'image',
compatMediaTemplate: {
version: 1,
mediaType: 'image',
mode: 'sync',
create: { method: 'POST', path: '/v1/images/generations' },
response: { outputUrlPath: 'data[0].url' },
},
})
resolveModelGatewayRouteMock.mockReturnValueOnce('openai-compat')
const result = await generateImage('user-1', 'openai-compatible:oa-1::gpt-image-1', 'draw cat', {
size: '1024x1024',
})
expect(generateImageViaOpenAICompatTemplateMock).toHaveBeenCalledTimes(1)
expect(createImageGeneratorMock).not.toHaveBeenCalled()
expect(result).toEqual({ success: true, imageUrl: 'compat-template-image' })
})
it('routes official image requests to provider generator', async () => {
resolveModelSelectionMock.mockResolvedValueOnce({
provider: 'google',
modelId: 'imagen-4.0',
modelKey: 'google::imagen-4.0',
mediaType: 'image',
})
resolveModelGatewayRouteMock.mockReturnValueOnce('official')
const result = await generateImage('user-1', 'google::imagen-4.0', 'draw house')
expect(createImageGeneratorMock).toHaveBeenCalledWith('google', 'imagen-4.0')
expect(generateImageViaOpenAICompatMock).not.toHaveBeenCalled()
expect(result).toEqual({ success: true, imageUrl: 'official-image' })
})
it('routes gemini-compatible image to official generator', async () => {
resolveModelSelectionMock.mockResolvedValueOnce({
provider: 'gemini-compatible:gm-1',
modelId: 'gemini-2.5-flash-image-preview',
modelKey: 'gemini-compatible:gm-1::gemini-2.5-flash-image-preview',
mediaType: 'image',
})
getProviderConfigMock.mockResolvedValueOnce({
id: 'gemini-compatible:gm-1',
name: 'Gemini Compatible',
apiKey: 'gm-key',
baseUrl: 'https://gm.test',
apiMode: 'gemini-sdk',
gatewayRoute: 'official',
})
const result = await generateImage(
'user-1',
'gemini-compatible:gm-1::gemini-2.5-flash-image-preview',
'draw cat',
{ aspectRatio: '3:4' },
)
expect(createImageGeneratorMock).toHaveBeenCalledWith('gemini-compatible:gm-1', 'gemini-2.5-flash-image-preview')
expect(generateImageViaOpenAICompatMock).not.toHaveBeenCalled()
expect(result).toEqual({ success: true, imageUrl: 'official-image' })
})
it('routes openai-compatible video requests to openai-compat gateway', async () => {
resolveModelSelectionMock.mockResolvedValueOnce({
provider: 'openai-compatible:oa-1',
modelId: 'sora-2',
modelKey: 'openai-compatible:oa-1::sora-2',
mediaType: 'video',
compatMediaTemplate: {
version: 1,
mediaType: 'video',
mode: 'async',
create: { method: 'POST', path: '/v1/videos/generations' },
response: { taskIdPath: 'id' },
},
})
resolveModelGatewayRouteMock.mockReturnValueOnce('openai-compat')
const result = await generateVideo(
'user-1',
'openai-compatible:oa-1::sora-2',
'https://example.com/source.png',
{ prompt: 'animate' },
)
expect(generateVideoViaOpenAICompatTemplateMock).toHaveBeenCalledTimes(1)
expect(createVideoGeneratorMock).not.toHaveBeenCalled()
expect(result).toEqual({ success: true, videoUrl: 'compat-template-video' })
})
it('routes gemini-compatible video to official provider generator', async () => {
resolveModelSelectionMock.mockResolvedValueOnce({
provider: 'gemini-compatible:gm-1',
modelId: 'veo-3.1-generate-preview',
modelKey: 'gemini-compatible:gm-1::veo-3.1-generate-preview',
mediaType: 'video',
})
resolveModelGatewayRouteMock.mockReturnValueOnce('official')
const result = await generateVideo('user-1', 'gemini-compatible:gm-1::veo-3.1-generate-preview', 'https://example.com/source.png')
expect(createVideoGeneratorMock).toHaveBeenCalledWith('gemini-compatible:gm-1')
expect(generateVideoViaOpenAICompatMock).not.toHaveBeenCalled()
expect(result).toEqual({ success: true, videoUrl: 'official-video' })
})
it('routes official video requests to provider generator', async () => {
resolveModelSelectionMock.mockResolvedValueOnce({
provider: 'fal',
modelId: 'kling',
modelKey: 'fal::kling',
mediaType: 'video',
})
resolveModelGatewayRouteMock.mockReturnValueOnce('official')
const result = await generateVideo('user-1', 'fal::kling', 'https://example.com/source.png')
expect(createVideoGeneratorMock).toHaveBeenCalledWith('fal')
expect(generateVideoViaOpenAICompatMock).not.toHaveBeenCalled()
expect(result).toEqual({ success: true, videoUrl: 'official-video' })
})
it('keeps audio generation on provider generator path', async () => {
resolveModelSelectionMock.mockResolvedValueOnce({
provider: 'fal',
modelId: 'tts-1',
modelKey: 'fal::tts-1',
mediaType: 'audio',
})
const result = await generateAudio('user-1', 'fal::tts-1', 'hello')
expect(createAudioGeneratorMock).toHaveBeenCalledWith('fal')
expect(result).toEqual({ success: true, audioUrl: 'audio' })
})
it('routes bailian image generation to official provider adapter', async () => {
resolveModelSelectionMock.mockResolvedValueOnce({
provider: 'bailian',
modelId: 'wanx-image',
modelKey: 'bailian::wanx-image',
mediaType: 'image',
})
getProviderConfigMock.mockResolvedValueOnce({
id: 'bailian',
name: 'Bailian',
apiKey: 'bl-key',
gatewayRoute: 'official',
apiMode: undefined,
})
const result = await generateImage('user-1', 'bailian::wanx-image', 'draw sky')
expect(generateBailianImageMock).toHaveBeenCalledTimes(1)
expect(generateImageViaOpenAICompatMock).not.toHaveBeenCalled()
expect(createImageGeneratorMock).not.toHaveBeenCalled()
expect(result).toEqual({ success: true, imageUrl: 'bailian-image' })
})
it('routes siliconflow video generation to official provider adapter', async () => {
resolveModelSelectionMock.mockResolvedValueOnce({
provider: 'siliconflow',
modelId: 'sf-video',
modelKey: 'siliconflow::sf-video',
mediaType: 'video',
})
getProviderConfigMock.mockResolvedValueOnce({
id: 'siliconflow',
name: 'SiliconFlow',
apiKey: 'sf-key',
gatewayRoute: 'official',
apiMode: undefined,
})
const result = await generateVideo('user-1', 'siliconflow::sf-video', 'https://example.com/source.png', {
prompt: 'animate',
})
expect(generateSiliconFlowVideoMock).toHaveBeenCalledTimes(1)
expect(generateVideoViaOpenAICompatMock).not.toHaveBeenCalled()
expect(createVideoGeneratorMock).not.toHaveBeenCalled()
expect(result).toEqual({ success: true, videoUrl: 'siliconflow-video' })
})
it('routes bailian audio generation to official provider adapter', async () => {
resolveModelSelectionMock.mockResolvedValueOnce({
provider: 'bailian',
modelId: 'bailian-tts',
modelKey: 'bailian::bailian-tts',
mediaType: 'audio',
})
const result = await generateAudio('user-1', 'bailian::bailian-tts', 'hello')
expect(generateBailianAudioMock).toHaveBeenCalledTimes(1)
expect(createAudioGeneratorMock).not.toHaveBeenCalled()
expect(result).toEqual({ success: true, audioUrl: 'bailian-audio' })
})
})
+22
View File
@@ -0,0 +1,22 @@
import { describe, expect, it } from 'vitest'
import { createAudioGenerator, createImageGenerator, createVideoGenerator } from '@/lib/generators/factory'
import { GoogleVeoVideoGenerator } from '@/lib/generators/video/google'
import { OpenAICompatibleVideoGenerator } from '@/lib/generators/video/openai-compatible'
import { BailianAudioGenerator, BailianImageGenerator, BailianVideoGenerator, SiliconFlowAudioGenerator } from '@/lib/generators/official'
describe('generator factory', () => {
it('routes gemini-compatible video provider to Google video generator', () => {
const generator = createVideoGenerator('gemini-compatible:gm-1')
expect(generator).toBeInstanceOf(GoogleVeoVideoGenerator)
})
it('routes bailian official providers to official generators', () => {
expect(createImageGenerator('bailian')).toBeInstanceOf(BailianImageGenerator)
expect(createVideoGenerator('bailian')).toBeInstanceOf(BailianVideoGenerator)
expect(createAudioGenerator('bailian')).toBeInstanceOf(BailianAudioGenerator)
})
it('routes siliconflow audio provider to official generator', () => {
expect(createAudioGenerator('siliconflow')).toBeInstanceOf(SiliconFlowAudioGenerator)
})
})
@@ -0,0 +1,94 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const apiConfigMock = vi.hoisted(() => ({
getProviderConfig: vi.fn(async () => ({ apiKey: 'fal-key' })),
}))
const asyncSubmitMock = vi.hoisted(() => ({
submitFalTask: vi.fn(async () => 'req_kling_1'),
}))
vi.mock('@/lib/api-config', () => apiConfigMock)
vi.mock('@/lib/async-submit', () => asyncSubmitMock)
import { FalVideoGenerator } from '@/lib/generators/fal'
type KlingModelCase = {
modelId: string
endpoint: string
imageField: 'image_url' | 'start_image_url'
}
const KLING_MODEL_CASES: KlingModelCase[] = [
{
modelId: 'fal-ai/kling-video/v2.5-turbo/pro/image-to-video',
endpoint: 'fal-ai/kling-video/v2.5-turbo/pro/image-to-video',
imageField: 'image_url',
},
{
modelId: 'fal-ai/kling-video/v3/standard/image-to-video',
endpoint: 'fal-ai/kling-video/v3/standard/image-to-video',
imageField: 'start_image_url',
},
{
modelId: 'fal-ai/kling-video/v3/pro/image-to-video',
endpoint: 'fal-ai/kling-video/v3/pro/image-to-video',
imageField: 'start_image_url',
},
]
describe('FalVideoGenerator kling presets', () => {
beforeEach(() => {
vi.clearAllMocks()
apiConfigMock.getProviderConfig.mockResolvedValue({ apiKey: 'fal-key' })
asyncSubmitMock.submitFalTask.mockResolvedValue('req_kling_1')
})
it.each(KLING_MODEL_CASES)('submits $modelId to expected endpoint and payload', async ({ modelId, endpoint, imageField }) => {
const generator = new FalVideoGenerator()
const result = await generator.generate({
userId: 'user-1',
imageUrl: 'https://example.com/start.png',
prompt: 'test prompt',
options: {
modelId,
duration: 5,
aspectRatio: '16:9',
},
})
expect(result.success).toBe(true)
expect(result.endpoint).toBe(endpoint)
expect(result.requestId).toBe('req_kling_1')
expect(result.externalId).toBe(`FAL:VIDEO:${endpoint}:req_kling_1`)
expect(apiConfigMock.getProviderConfig).toHaveBeenCalledWith('user-1', 'fal')
const submitCall = asyncSubmitMock.submitFalTask.mock.calls.at(0) as
| [string, Record<string, unknown>, string]
| undefined
expect(submitCall).toBeTruthy()
if (!submitCall) {
throw new Error('submitFalTask should be called')
}
expect(submitCall[0]).toBe(endpoint)
expect(submitCall[2]).toBe('fal-key')
const payload = submitCall[1]
expect(payload.prompt).toBe('test prompt')
expect(payload.duration).toBe('5')
if (imageField === 'image_url') {
expect(payload.image_url).toBe('https://example.com/start.png')
expect(payload.start_image_url).toBeUndefined()
expect(payload.negative_prompt).toBe('blur, distort, and low quality')
expect(payload.cfg_scale).toBe(0.5)
return
}
expect(payload.start_image_url).toBe('https://example.com/start.png')
expect(payload.image_url).toBeUndefined()
expect(payload.aspect_ratio).toBe('16:9')
expect(payload.generate_audio).toBe(false)
})
})
@@ -0,0 +1,264 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const googleGenerateContentMock = vi.hoisted(() => vi.fn())
const getProviderConfigMock = vi.hoisted(() => vi.fn())
const getImageBase64CachedMock = vi.hoisted(() => vi.fn(async () => 'data:image/png;base64,UkVG'))
const arkImageGenerationMock = vi.hoisted(() => vi.fn())
const normalizeToBase64ForGenerationMock = vi.hoisted(() => vi.fn(async () => 'UkVG'))
vi.mock('@google/genai', () => ({
GoogleGenAI: class GoogleGenAI {
models = {
generateContent: googleGenerateContentMock,
}
},
HarmCategory: {
HARM_CATEGORY_HARASSMENT: 'HARM_CATEGORY_HARASSMENT',
HARM_CATEGORY_HATE_SPEECH: 'HARM_CATEGORY_HATE_SPEECH',
HARM_CATEGORY_SEXUALLY_EXPLICIT: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
HARM_CATEGORY_DANGEROUS_CONTENT: 'HARM_CATEGORY_DANGEROUS_CONTENT',
},
HarmBlockThreshold: {
BLOCK_NONE: 'BLOCK_NONE',
},
}))
vi.mock('@/lib/api-config', () => ({
getProviderConfig: getProviderConfigMock,
}))
vi.mock('@/lib/image-cache', () => ({
getImageBase64Cached: getImageBase64CachedMock,
}))
vi.mock('@/lib/ark-api', () => ({
arkImageGeneration: arkImageGenerationMock,
}))
vi.mock('@/lib/media/outbound-image', () => ({
normalizeToBase64ForGeneration: normalizeToBase64ForGenerationMock,
}))
import { ArkSeedreamGenerator } from '@/lib/generators/ark'
import { GeminiCompatibleImageGenerator } from '@/lib/generators/image/gemini-compatible'
import { GoogleGeminiImageGenerator } from '@/lib/generators/image/google'
describe('image provider smoke tests', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('Google Gemini 官方文生图可用 -> 返回 data URL', async () => {
getProviderConfigMock.mockResolvedValueOnce({
id: 'google',
apiKey: 'google-key',
})
googleGenerateContentMock.mockResolvedValueOnce({
candidates: [
{
content: {
parts: [
{
inlineData: {
mimeType: 'image/png',
data: 'R09PR0xF',
},
},
],
},
},
],
})
const generator = new GoogleGeminiImageGenerator('gemini-3-pro-image-preview')
const result = await generator.generate({
userId: 'user-1',
prompt: 'draw a mountain',
options: {
aspectRatio: '3:4',
},
})
expect(result).toEqual({
success: true,
imageBase64: 'R09PR0xF',
imageUrl: 'data:image/png;base64,R09PR0xF',
})
expect(googleGenerateContentMock).toHaveBeenCalledWith({
model: 'gemini-3-pro-image-preview',
contents: [{ parts: [{ text: 'draw a mountain' }] }],
config: expect.objectContaining({
responseModalities: ['TEXT', 'IMAGE'],
imageConfig: { aspectRatio: '3:4' },
}),
})
})
it('Seedream 图生图可用 -> 返回 ARK 图片 URL', async () => {
getProviderConfigMock.mockResolvedValueOnce({
id: 'ark',
apiKey: 'ark-key',
})
arkImageGenerationMock.mockResolvedValueOnce({
data: [{ url: 'https://seedream.test/image.png' }],
})
const generator = new ArkSeedreamGenerator()
const result = await generator.generate({
userId: 'user-1',
prompt: 'refine this style',
referenceImages: ['https://example.com/ref.png'],
options: {
modelId: 'doubao-seedream-4-5-251128',
aspectRatio: '3:4',
},
})
expect(result).toEqual({
success: true,
imageUrl: 'https://seedream.test/image.png',
})
expect(arkImageGenerationMock).toHaveBeenCalledWith({
model: 'doubao-seedream-4-5-251128',
prompt: 'refine this style',
sequential_image_generation: 'disabled',
response_format: 'url',
stream: false,
watermark: false,
size: '3544x4728',
image: ['UkVG'],
}, {
apiKey: 'ark-key',
logPrefix: '[ARK Image]',
})
})
it('Seedream 返回多图时 -> 同时返回 imageUrl 和 imageUrls', async () => {
getProviderConfigMock.mockResolvedValueOnce({
id: 'ark',
apiKey: 'ark-key',
})
arkImageGenerationMock.mockResolvedValueOnce({
data: [
{ url: 'https://seedream.test/image-1.png' },
{ url: 'https://seedream.test/image-2.png' },
],
})
const generator = new ArkSeedreamGenerator()
const result = await generator.generate({
userId: 'user-1',
prompt: 'refine this style',
referenceImages: ['https://example.com/ref.png'],
options: {
modelId: 'doubao-seedream-4-5-251128',
aspectRatio: '3:4',
},
})
expect(result).toEqual({
success: true,
imageUrl: 'https://seedream.test/image-1.png',
imageUrls: ['https://seedream.test/image-1.png', 'https://seedream.test/image-2.png'],
})
})
it('Gemini 兼容层文生图可用 -> 直连 Gemini SDK 协议返回图片', async () => {
getProviderConfigMock.mockResolvedValueOnce({
id: 'gemini-compatible:gm-1',
apiKey: 'gm-key',
baseUrl: 'https://gm.test',
})
googleGenerateContentMock.mockResolvedValueOnce({
candidates: [
{
content: {
parts: [
{
inlineData: {
mimeType: 'image/webp',
data: 'R01fVEVYVA==',
},
},
],
},
},
],
})
const generator = new GeminiCompatibleImageGenerator('gemini-2.5-flash-image-preview', 'gemini-compatible:gm-1')
const result = await generator.generate({
userId: 'user-1',
prompt: 'draw a cat',
options: {
aspectRatio: '1:1',
},
})
expect(result).toEqual({
success: true,
imageBase64: 'R01fVEVYVA==',
imageUrl: 'data:image/webp;base64,R01fVEVYVA==',
})
expect(googleGenerateContentMock).toHaveBeenCalledWith({
model: 'gemini-2.5-flash-image-preview',
contents: [{ parts: [{ text: 'draw a cat' }] }],
config: expect.objectContaining({
responseModalities: ['TEXT', 'IMAGE'],
imageConfig: { aspectRatio: '1:1' },
}),
})
})
it('Gemini 兼容层图生图可用 -> 参考图会注入 inlineData', async () => {
getProviderConfigMock.mockResolvedValueOnce({
id: 'gemini-compatible:gm-1',
apiKey: 'gm-key',
baseUrl: 'https://gm.test',
})
googleGenerateContentMock.mockResolvedValueOnce({
candidates: [
{
content: {
parts: [
{
inlineData: {
mimeType: 'image/png',
data: 'R01fSTJJPQ==',
},
},
],
},
},
],
})
const generator = new GeminiCompatibleImageGenerator('gemini-2.5-flash-image-preview', 'gemini-compatible:gm-1')
const result = await generator.generate({
userId: 'user-1',
prompt: 'restyle this portrait',
referenceImages: ['/api/files/ref-image'],
options: {
resolution: '2K',
},
})
expect(result).toEqual({
success: true,
imageBase64: 'R01fSTJJPQ==',
imageUrl: 'data:image/png;base64,R01fSTJJPQ==',
})
const call = googleGenerateContentMock.mock.calls[0]
expect(call).toBeTruthy()
if (!call) {
throw new Error('Gemini generateContent should be called')
}
const content = call[0] as {
contents: Array<{ parts: Array<{ inlineData?: { mimeType: string; data: string }; text?: string }> }>
config: { imageConfig?: { imageSize?: string } }
}
expect(content.contents[0].parts[0].inlineData).toEqual({ mimeType: 'image/png', data: 'UkVG' })
expect(content.contents[0].parts[1].text).toBe('restyle this portrait')
expect(content.config.imageConfig).toEqual({ imageSize: '2K' })
})
})
@@ -0,0 +1,122 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const openAIState = vi.hoisted(() => ({
generate: vi.fn(),
edit: vi.fn(),
toFile: vi.fn(async () => ({ name: 'mock-file' })),
}))
const getProviderConfigMock = vi.hoisted(() => vi.fn(async () => ({
id: 'openai-compatible:oa-1',
apiKey: 'oa-key',
baseUrl: 'https://oa.test/v1',
})))
const getImageBase64CachedMock = vi.hoisted(() => vi.fn(async () => 'data:image/png;base64,QQ=='))
vi.mock('openai', () => ({
default: class OpenAI {
images = {
generate: openAIState.generate,
edit: openAIState.edit,
}
},
toFile: openAIState.toFile,
}))
vi.mock('@/lib/api-config', () => ({
getProviderConfig: getProviderConfigMock,
}))
vi.mock('@/lib/image-cache', () => ({
getImageBase64Cached: getImageBase64CachedMock,
}))
import { OpenAICompatibleImageGenerator } from '@/lib/generators/image/openai-compatible'
describe('OpenAICompatibleImageGenerator', () => {
beforeEach(() => {
vi.clearAllMocks()
getProviderConfigMock.mockResolvedValue({
id: 'openai-compatible:oa-1',
apiKey: 'oa-key',
baseUrl: 'https://oa.test/v1',
})
})
it('uses official images.generate payload parameters', async () => {
openAIState.generate.mockResolvedValueOnce({
data: [{ b64_json: 'YmFzZTY0' }],
})
const generator = new OpenAICompatibleImageGenerator('gpt-image-1', 'openai-compatible:oa-1')
const result = await generator.generate({
userId: 'user-1',
prompt: 'draw a lighthouse',
options: {
size: '1024x1024',
quality: 'high',
outputFormat: 'png',
responseFormat: 'b64_json',
},
})
expect(result.success).toBe(true)
expect(result.imageBase64).toBe('YmFzZTY0')
expect(result.imageUrl).toBe('data:image/png;base64,YmFzZTY0')
expect(openAIState.generate).toHaveBeenCalledWith({
model: 'gpt-image-1',
prompt: 'draw a lighthouse',
response_format: 'b64_json',
output_format: 'png',
quality: 'high',
size: '1024x1024',
})
})
it('uses official images.edit payload when reference images are provided', async () => {
openAIState.edit.mockResolvedValueOnce({
data: [{ b64_json: 'ZWRpdA==' }],
})
const generator = new OpenAICompatibleImageGenerator('gpt-image-1', 'openai-compatible:oa-1')
const result = await generator.generate({
userId: 'user-1',
prompt: 'edit this image',
referenceImages: ['data:image/png;base64,QQ=='],
options: {
quality: 'medium',
},
})
expect(result.success).toBe(true)
expect(openAIState.toFile).toHaveBeenCalledTimes(1)
const call = openAIState.edit.mock.calls[0]
expect(call).toBeTruthy()
if (!call) {
throw new Error('images.edit should be called')
}
expect(call[0]).toMatchObject({
model: 'gpt-image-1',
prompt: 'edit this image',
response_format: 'b64_json',
quality: 'medium',
})
expect(Array.isArray((call[0] as { image?: unknown }).image)).toBe(true)
})
it('fails explicitly on unsupported option values', async () => {
const generator = new OpenAICompatibleImageGenerator('gpt-image-1', 'openai-compatible:oa-1')
const result = await generator.generate({
userId: 'user-1',
prompt: 'draw',
options: {
quality: 'ultra',
},
})
expect(result.success).toBe(false)
expect(result.error).toContain('OPENAI_COMPAT_IMAGE_OPTION_UNSUPPORTED')
})
})
@@ -0,0 +1,166 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const openAIState = vi.hoisted(() => ({
create: vi.fn(),
toFile: vi.fn(async () => ({ name: 'reference-file' })),
}))
const getProviderConfigMock = vi.hoisted(() => vi.fn(async () => ({
id: 'openai-compatible:oa-1',
apiKey: 'oa-key',
baseUrl: 'https://oa.test/v1',
})))
const normalizeToBase64ForGenerationMock = vi.hoisted(() => vi.fn(async () => 'data:image/png;base64,QQ=='))
vi.mock('openai', () => ({
default: class OpenAI {
videos = {
create: openAIState.create,
}
},
toFile: openAIState.toFile,
}))
vi.mock('@/lib/api-config', () => ({
getProviderConfig: getProviderConfigMock,
}))
vi.mock('@/lib/media/outbound-image', () => ({
normalizeToBase64ForGeneration: normalizeToBase64ForGenerationMock,
}))
import { OpenAICompatibleVideoGenerator } from '@/lib/generators/video/openai-compatible'
describe('OpenAICompatibleVideoGenerator', () => {
beforeEach(() => {
vi.clearAllMocks()
getProviderConfigMock.mockResolvedValue({
id: 'openai-compatible:oa-1',
apiKey: 'oa-key',
baseUrl: 'https://oa.test/v1',
})
})
it('submits official videos.create payload and returns OPENAI externalId', async () => {
openAIState.create.mockResolvedValueOnce({ id: 'vid_123' })
const generator = new OpenAICompatibleVideoGenerator('openai-compatible:oa-1')
const result = await generator.generate({
userId: 'user-1',
imageUrl: 'https://example.com/seed.png',
prompt: 'animate this character',
options: {
modelId: 'sora-2',
duration: 8,
resolution: '720p',
aspectRatio: '16:9',
},
})
expect(result.success).toBe(true)
expect(result.async).toBe(true)
expect(result.requestId).toBe('vid_123')
const expectedProviderToken = Buffer.from('openai-compatible:oa-1', 'utf8').toString('base64url')
expect(result.externalId).toBe(`OPENAI:VIDEO:${expectedProviderToken}:vid_123`)
const createCall = openAIState.create.mock.calls[0]
expect(createCall).toBeTruthy()
if (!createCall) {
throw new Error('videos.create should be called')
}
expect(createCall[0]).toMatchObject({
prompt: 'animate this character',
model: 'sora-2',
seconds: '8',
size: '1280x720',
})
expect((createCall[0] as { input_reference?: unknown }).input_reference).toBeDefined()
})
it('allows custom model ids for openai-compatible gateways', async () => {
openAIState.create.mockResolvedValueOnce({ id: 'vid_custom' })
const generator = new OpenAICompatibleVideoGenerator('openai-compatible:oa-1')
const result = await generator.generate({
userId: 'user-1',
imageUrl: 'https://example.com/seed.png',
prompt: 'animate',
options: {
modelId: 'veo_3_1-fast-4K',
},
})
expect(result.success).toBe(true)
const createCall = openAIState.create.mock.calls.at(0)
expect(createCall).toBeTruthy()
if (!createCall) {
throw new Error('videos.create should be called')
}
expect((createCall[0] as { model?: string }).model).toBe('veo_3_1-fast-4K')
})
it('maps 3:2 to landscape size explicitly', async () => {
openAIState.create.mockResolvedValueOnce({ id: 'vid_32' })
const generator = new OpenAICompatibleVideoGenerator('openai-compatible:oa-1')
const result = await generator.generate({
userId: 'user-1',
imageUrl: 'https://example.com/seed.png',
prompt: 'animate',
options: {
resolution: '1080p',
aspectRatio: '3:2',
},
})
expect(result.success).toBe(true)
const createCall = openAIState.create.mock.calls.at(0)
expect(createCall).toBeTruthy()
if (!createCall) {
throw new Error('videos.create should be called')
}
expect((createCall[0] as { size?: string }).size).toBe('1792x1024')
})
it('maps 2:3 to portrait size explicitly', async () => {
openAIState.create.mockResolvedValueOnce({ id: 'vid_23' })
const generator = new OpenAICompatibleVideoGenerator('openai-compatible:oa-1')
const result = await generator.generate({
userId: 'user-1',
imageUrl: 'https://example.com/seed.png',
prompt: 'animate',
options: {
resolution: '720p',
aspectRatio: '2:3',
},
})
expect(result.success).toBe(true)
const createCall = openAIState.create.mock.calls.at(0)
expect(createCall).toBeTruthy()
if (!createCall) {
throw new Error('videos.create should be called')
}
expect((createCall[0] as { size?: string }).size).toBe('720x1280')
})
it('fails explicitly on unsupported aspect ratios', async () => {
const generator = new OpenAICompatibleVideoGenerator('openai-compatible:oa-1')
const result = await generator.generate({
userId: 'user-1',
imageUrl: 'https://example.com/seed.png',
prompt: 'animate',
options: {
resolution: '720p',
aspectRatio: '5:4',
},
})
expect(result.success).toBe(false)
expect(result.error).toContain('OPENAI_COMPAT_VIDEO_ASPECT_RATIO_UNSUPPORTED')
})
})
@@ -0,0 +1,53 @@
import { describe, expect, it } from 'vitest'
import {
API_HANDLER_ALLOWLIST,
PUBLIC_ROUTE_ALLOWLIST,
inspectRouteContract,
} from '../../../scripts/guards/api-route-contract-guard.mjs'
describe('api route contract guard', () => {
it('allows explicit public and framework-managed exceptions', () => {
expect(API_HANDLER_ALLOWLIST.has('src/app/api/auth/[...nextauth]/route.ts')).toBe(true)
expect(PUBLIC_ROUTE_ALLOWLIST.has('src/app/api/system/boot-id/route.ts')).toBe(true)
expect(
inspectRouteContract(
'src/app/api/system/boot-id/route.ts',
'export async function GET() { return Response.json({ bootId: "x" }) }',
),
).toEqual([])
})
it('passes protected routes that use apiHandler and explicit auth', () => {
const content = `
import { requireUserAuth } from '@/lib/api-auth'
import { apiHandler } from '@/lib/api-errors'
export const GET = apiHandler(async () => {
await requireUserAuth()
return Response.json({ ok: true })
})
`
expect(inspectRouteContract('src/app/api/user/secure/route.ts', content)).toEqual([])
})
it('flags protected routes that skip apiHandler or auth', () => {
const missingApiHandler = `
import { requireUserAuth } from '@/lib/api-auth'
export async function GET() {
await requireUserAuth()
return Response.json({ ok: true })
}
`
const missingAuth = `
import { apiHandler } from '@/lib/api-errors'
export const GET = apiHandler(async () => Response.json({ ok: true }))
`
expect(inspectRouteContract('src/app/api/user/secure/route.ts', missingApiHandler)).toEqual([
'src/app/api/user/secure/route.ts missing apiHandler wrapper',
])
expect(inspectRouteContract('src/app/api/user/secure/route.ts', missingAuth)).toEqual([
'src/app/api/user/secure/route.ts missing requireUserAuth/requireProjectAuth/requireProjectAuthLight',
])
})
})
@@ -0,0 +1,29 @@
import { describe, expect, it } from 'vitest'
import { inspectChangedFiles } from '../../../scripts/guards/changed-file-test-impact-guard.mjs'
describe('changed-file-test-impact-guard', () => {
it('requires api changes to be paired with contract, system, or regression tests', () => {
const violations = inspectChangedFiles([
'src/app/api/novel-promotion/[projectId]/generate-image/route.ts',
])
expect(violations).toEqual([
'api: changing src/app/api/** requires a matching contract, system, or regression test change; sources=src/app/api/novel-promotion/[projectId]/generate-image/route.ts',
])
})
it('accepts worker changes when system tests are updated together', () => {
const violations = inspectChangedFiles([
'src/lib/workers/image.worker.ts',
'tests/system/generate-image.system.test.ts',
])
expect(violations).toEqual([])
})
it('accepts provider changes when provider contract coverage is updated', () => {
const violations = inspectChangedFiles([
'src/lib/model-gateway/openai-compat/image.ts',
'tests/unit/model-gateway/openai-compat-template-image-output-urls.test.ts',
])
expect(violations).toEqual([])
})
})
@@ -0,0 +1,53 @@
import { describe, expect, it } from 'vitest'
import {
NORMALIZATION_HELPER_ALLOWLIST,
inspectImageReferenceNormalization,
} from '../../../scripts/guards/image-reference-normalization-guard.mjs'
describe('image reference normalization guard', () => {
it('allows shared helper exceptions explicitly', () => {
expect(NORMALIZATION_HELPER_ALLOWLIST.has('src/lib/workers/handlers/image-task-handler-shared.ts')).toBe(true)
expect(
inspectImageReferenceNormalization(
'src/lib/workers/handlers/image-task-handler-shared.ts',
'resolveImageSourceFromGeneration(job, { options: params.options })\nreferenceImages?: string[]',
),
).toEqual([])
})
it('passes handlers that normalize reference images before generation', () => {
const content = `
import { normalizeReferenceImagesForGeneration } from '@/lib/media/outbound-image'
async function run() {
const normalizedRefs = await normalizeReferenceImagesForGeneration(refs)
return await resolveImageSourceFromGeneration(job, {
options: {
referenceImages: normalizedRefs,
},
})
}
`
expect(
inspectImageReferenceNormalization('src/lib/workers/handlers/panel-image-task-handler.ts', content),
).toEqual([])
})
it('flags handlers that send referenceImages without normalization markers', () => {
const content = `
async function run() {
return await resolveImageSourceFromGeneration(job, {
options: {
referenceImages: refs,
},
})
}
`
expect(
inspectImageReferenceNormalization('src/lib/workers/handlers/bad-handler.ts', content),
).toEqual([
'src/lib/workers/handlers/bad-handler.ts uses resolveImageSourceFromGeneration with referenceImages but does not reference normalizeReferenceImagesForGeneration/normalizeToBase64ForGeneration/generateProjectLabeledImageToStorage/generateCleanImageToStorage',
])
})
})
@@ -0,0 +1,43 @@
import { describe, expect, it } from 'vitest'
import { inspectTaskSubmitCompensation } from '../../../scripts/guards/task-submit-compensation-guard.mjs'
describe('task submit compensation guard', () => {
it('passes routes that create data before submitTask and define rollback handling', () => {
const content = `
async function rollbackCreatedRecord() {}
export const POST = apiHandler(async () => {
await prisma.panel.create({ data: {} })
try {
return await submitTask({})
} catch (error) {
await rollbackCreatedRecord()
throw error
}
})
`
expect(
inspectTaskSubmitCompensation('src/app/api/novel-promotion/[projectId]/panel-variant/route.ts', content),
).toEqual([])
})
it('ignores routes that do not combine create and submitTask', () => {
expect(inspectTaskSubmitCompensation('src/app/api/user/api-config/route.ts', 'await submitTask({})')).toEqual([])
expect(inspectTaskSubmitCompensation('src/app/api/projects/route.ts', 'await prisma.project.create({ data: {} })')).toEqual([])
})
it('flags routes that create data before submitTask without compensation marker', () => {
const content = `
export const POST = apiHandler(async () => {
await prisma.panel.create({ data: {} })
return await submitTask({})
})
`
expect(
inspectTaskSubmitCompensation('src/app/api/example/route.ts', content),
).toEqual([
'src/app/api/example/route.ts creates data before submitTask without explicit rollback/compensation marker',
])
})
})
+58
View File
@@ -0,0 +1,58 @@
import { afterEach, describe, expect, it, vi } from 'vitest'
import { apiFetch } from '@/lib/api-fetch'
describe('apiFetch locale header injection', () => {
const originalFetch = globalThis.fetch
afterEach(() => {
globalThis.fetch = originalFetch
vi.unstubAllGlobals()
vi.clearAllMocks()
})
it('injects Accept-Language for internal /api requests', async () => {
const fetchMock = vi.fn<typeof fetch>().mockResolvedValue(new Response(null, { status: 204 }))
globalThis.fetch = fetchMock
await apiFetch('/api/tasks?status=running', { method: 'GET' })
const init = fetchMock.mock.calls[0]?.[1]
const headers = new Headers(init?.headers)
expect(headers.get('Accept-Language')).toBe('zh')
})
it('uses pathname locale and does not override explicit Accept-Language', async () => {
vi.stubGlobal('window', {
location: {
pathname: '/en/workspace',
},
})
const fetchMock = vi.fn<typeof fetch>().mockResolvedValue(new Response(null, { status: 204 }))
globalThis.fetch = fetchMock
await apiFetch('/api/projects', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Accept-Language': 'ja',
},
body: JSON.stringify({ ok: true }),
})
const init = fetchMock.mock.calls[0]?.[1]
const headers = new Headers(init?.headers)
expect(headers.get('Accept-Language')).toBe('ja')
})
it('does not inject locale header for non-internal URLs', async () => {
const fetchMock = vi.fn<typeof fetch>().mockResolvedValue(new Response(null, { status: 204 }))
globalThis.fetch = fetchMock
await apiFetch('https://example.com/health', { method: 'GET' })
const init = fetchMock.mock.calls[0]?.[1]
const headers = new Headers(init?.headers)
expect(headers.has('Accept-Language')).toBe(false)
})
})
+185
View File
@@ -0,0 +1,185 @@
import { describe, expect, it } from 'vitest'
import { safeParseJson, safeParseJsonObject, safeParseJsonArray } from '@/lib/json-repair'
// ─── safeParseJson ───────────────────────────────────────────────────
describe('safeParseJson', () => {
it('正常 JSON 字符串 -> 直接解析成功', () => {
const result = safeParseJson('{"name":"孙悟空","age":500}')
expect(result).toEqual({ name: '孙悟空', age: 500 })
})
it('包含 markdown 代码块 -> 剥离后解析成功', () => {
const input = '```json\n{"key":"value"}\n```'
const result = safeParseJson(input)
expect(result).toEqual({ key: 'value' })
})
it('包含大写 JSON 标记的 markdown 代码块 -> 剥离后解析成功', () => {
const input = '```JSON\n{"key":"value"}\n```'
const result = safeParseJson(input)
expect(result).toEqual({ key: 'value' })
})
it('尾部逗号 -> jsonrepair 修复后解析成功', () => {
const input = '{"a":1,"b":2,}'
const result = safeParseJson(input)
expect(result).toEqual({ a: 1, b: 2 })
})
it('单引号包裹字符串 -> jsonrepair 修复后解析成功', () => {
const input = "{'name':'张三','age':25}"
const result = safeParseJson(input)
expect(result).toEqual({ name: '张三', age: 25 })
})
it('JSON 前后有多余文字 -> jsonrepair 修复后解析成功', () => {
const input = '以下是分析结果:\n{"result":"success"}\n以上是所有内容。'
const result = safeParseJson(input)
expect(result).toEqual({ result: 'success' })
})
it('完全无效内容(无任何 JSON 结构字符)-> jsonrepair 将其视为字符串', () => {
// jsonrepair 会把纯文本修复为 JSON 字符串
const result = safeParseJson('这不是JSON')
expect(result).toBe('这不是JSON')
})
})
// ─── safeParseJsonObject ─────────────────────────────────────────────
describe('safeParseJsonObject', () => {
it('正常 JSON 对象 -> 返回对象', () => {
const result = safeParseJsonObject('{"characters":[],"locations":[]}')
expect(result).toEqual({ characters: [], locations: [] })
})
it('markdown 包裹的 JSON 对象 -> 剥离后返回对象', () => {
const input = '```json\n{"episodes":[{"number":1}]}\n```'
const result = safeParseJsonObject(input)
expect(result).toHaveProperty('episodes')
expect((result.episodes as unknown[])[0]).toEqual({ number: 1 })
})
it('包含中文角引号「」的内容 -> 正常解析保留', () => {
const input = '{"lines":"孙悟空怒道,「一个冒牌货,也敢拦你孙爷爷的路!」"}'
const result = safeParseJsonObject(input)
expect(result.lines).toBe('孙悟空怒道,「一个冒牌货,也敢拦你孙爷爷的路!」')
})
it('LLM 输出数组而非对象 -> 抛出 Expected JSON object 错误', () => {
expect(() => safeParseJsonObject('[1,2,3]')).toThrow('Expected JSON object')
})
it('尾部逗号 + markdown 包裹 -> 修复后返回正确对象', () => {
const input = '```json\n{"a":1,"b":"hello",}\n```'
const result = safeParseJsonObject(input)
expect(result).toEqual({ a: 1, b: 'hello' })
})
})
// ─── safeParseJsonArray ──────────────────────────────────────────────
describe('safeParseJsonArray', () => {
it('正常 JSON 数组 -> 返回对象数组', () => {
const input = '[{"id":1,"name":"角色A"},{"id":2,"name":"角色B"}]'
const result = safeParseJsonArray(input)
expect(result).toHaveLength(2)
expect(result[0]).toEqual({ id: 1, name: '角色A' })
expect(result[1]).toEqual({ id: 2, name: '角色B' })
})
it('对象包裹数组 + fallbackKey -> 提取内部数组', () => {
const input = '{"clips":[{"id":1},{"id":2}]}'
const result = safeParseJsonArray(input, 'clips')
expect(result).toHaveLength(2)
expect(result[0]).toEqual({ id: 1 })
})
it('对象包裹数组 + 无 fallbackKey -> 自动发现第一个数组字段', () => {
const input = '{"episodes":[{"number":1},{"number":2}]}'
const result = safeParseJsonArray(input)
expect(result).toHaveLength(2)
expect(result[0]).toEqual({ number: 1 })
})
it('markdown 包裹 + 尾部逗号 -> 修复后返回正确数组', () => {
const input = '```json\n[{"a":1},{"b":2},]\n```'
const result = safeParseJsonArray(input)
expect(result).toHaveLength(2)
expect(result[0]).toEqual({ a: 1 })
expect(result[1]).toEqual({ b: 2 })
})
it('过滤非对象元素(数字、字符串等)-> 只保留对象', () => {
const input = '[{"valid":true}, 42, "string", null, {"also":true}]'
const result = safeParseJsonArray(input)
expect(result).toHaveLength(2)
expect(result[0]).toEqual({ valid: true })
expect(result[1]).toEqual({ also: true })
})
it('空数组 -> 返回空数组', () => {
const result = safeParseJsonArray('[]')
expect(result).toHaveLength(0)
})
it('非数组非对象 -> 抛出错误', () => {
expect(() => safeParseJsonArray('"just a string"')).toThrow('Expected JSON array')
})
it('对象不含数组字段 -> 抛出错误', () => {
expect(() => safeParseJsonArray('{"key":"value"}')).toThrow('Expected JSON array')
})
})
// ─── 真实 LLM 畸形输出回归测试 ───────────────────────────────────────
describe('LLM 畸形 JSON 输出回归测试', () => {
it('中文弯引号嵌套在 JSON 值中 -> jsonrepair 修复成功', () => {
// 这是导致 "Invalid clip JSON format" 的典型场景
const llmOutput = '```json\n[{"description":"孙悟空怒道,\\u201c一个冒牌货!\\u201d"}]\n```'
const result = safeParseJsonArray(llmOutput)
expect(result).toHaveLength(1)
expect(result[0].description).toContain('孙悟空')
})
it('LLM 输出前后带解释文字 -> 提取并解析 JSON', () => {
const llmOutput = `好的,以下是分析结果:
{"locations":[{"name":"客厅_白天","summary":"主角居住的客厅"}]}
以上是所有场景分析。`
const result = safeParseJsonObject(llmOutput)
expect(result.locations).toBeDefined()
const locations = result.locations as unknown[]
expect(locations).toHaveLength(1)
})
it('使用「」角引号的台词内容 -> 正确解析不破坏 JSON', () => {
// 改造后的提示词要求 LLM 用「」替代引号
const llmOutput = '[{"speaker":"孙悟空","content":"「你竟敢拦我的路!」","emotionStrength":0.4}]'
const result = safeParseJsonArray(llmOutput)
expect(result).toHaveLength(1)
expect(result[0].speaker).toBe('孙悟空')
expect(result[0].content).toBe('「你竟敢拦我的路!」')
expect(result[0].emotionStrength).toBe(0.4)
})
it('带控制字符的 JSON -> jsonrepair 修复成功', () => {
// LLM 有时在字符串值中输出真实换行符
const llmOutput = '{"text":"第一行\\n第二行","count":2}'
const result = safeParseJsonObject(llmOutput)
expect(result.text).toBe('第一行\n第二行')
expect(result.count).toBe(2)
})
it('clips 包裹在对象中 -> 正确提取', () => {
// clips-build 中常见的 LLM 输出格式
const llmOutput = '{"clips":[{"id":"clip_1","startText":"从前"},{"id":"clip_2","startText":"后来"}]}'
const result = safeParseJsonArray(llmOutput, 'clips')
expect(result).toHaveLength(2)
expect(result[0].id).toBe('clip_1')
expect(result[1].startText).toBe('后来')
})
})
@@ -0,0 +1,25 @@
import { describe, expect, it } from 'vitest'
import { splitStructuredOutput } from '@/components/llm-console/LLMStageStreamCard'
describe('LLMStageStreamCard structured output parsing', () => {
it('moves think-tagged text from final block into reasoning', () => {
const parsed = splitStructuredOutput(`【思考过程】
已有思考
【最终结果】
<think>追加思考</think>
{"locations":[]}`)
expect(parsed.reasoning).toContain('已有思考')
expect(parsed.reasoning).toContain('追加思考')
expect(parsed.finalText).toBe('{"locations":[]}')
})
it('handles unmatched think opening tag during streaming', () => {
const parsed = splitStructuredOutput(`【最终结果】
<think>流式中的思考还没结束`)
expect(parsed.reasoning).toBe('流式中的思考还没结束')
expect(parsed.finalText).toBe('')
})
})
+63
View File
@@ -0,0 +1,63 @@
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
describe('logging core suppression', () => {
let originalLogLevel: string | undefined
let originalUnifiedEnabled: string | undefined
beforeEach(() => {
vi.resetModules()
originalLogLevel = process.env.LOG_LEVEL
originalUnifiedEnabled = process.env.LOG_UNIFIED_ENABLED
process.env.LOG_LEVEL = 'INFO'
process.env.LOG_UNIFIED_ENABLED = 'true'
})
afterEach(() => {
if (originalLogLevel === undefined) {
delete process.env.LOG_LEVEL
} else {
process.env.LOG_LEVEL = originalLogLevel
}
if (originalUnifiedEnabled === undefined) {
delete process.env.LOG_UNIFIED_ENABLED
} else {
process.env.LOG_UNIFIED_ENABLED = originalUnifiedEnabled
}
vi.restoreAllMocks()
})
it('suppresses worker.progress.stream logs', async () => {
const consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => undefined)
const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => undefined)
const { createScopedLogger } = await import('@/lib/logging/core')
const logger = createScopedLogger({ module: 'worker.waoowaoo-text' })
logger.info({
action: 'worker.progress.stream',
message: 'worker stream chunk',
details: {
kind: 'text',
seq: 1,
},
})
expect(consoleLogSpy).not.toHaveBeenCalled()
expect(consoleErrorSpy).not.toHaveBeenCalled()
})
it('keeps non-suppressed logs', async () => {
const consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => undefined)
const { createScopedLogger } = await import('@/lib/logging/core')
const logger = createScopedLogger({ module: 'worker.waoowaoo-text' })
logger.info({
action: 'worker.progress',
message: 'worker progress update',
})
expect(consoleLogSpy).toHaveBeenCalledTimes(1)
const payload = JSON.parse(String(consoleLogSpy.mock.calls[0]?.[0])) as { action?: string; message?: string }
expect(payload.action).toBe('worker.progress')
expect(payload.message).toBe('worker progress update')
})
})
@@ -0,0 +1,89 @@
import { describe, expect, it } from 'vitest'
import {
migrateGatewayRoutePayload,
migrateProviderEntry,
} from '@/lib/migrations/gateway-route-openai-compat'
describe('gateway-route openai-compat migration', () => {
it('migrates openai-compatible litellm route to openai-compat', () => {
const result = migrateProviderEntry({
id: 'openai-compatible:oa-1',
gatewayRoute: 'litellm',
})
expect(result.changed).toBe(true)
expect(result.next).toMatchObject({
id: 'openai-compatible:oa-1',
gatewayRoute: 'openai-compat',
})
expect(result.summary.routeLitellmToOpenaiCompat).toBe(1)
})
it('forces gemini-compatible to gemini-sdk + official route', () => {
const result = migrateProviderEntry({
id: 'gemini-compatible:gm-1',
apiMode: 'openai-official',
gatewayRoute: 'openai-compat',
})
expect(result.changed).toBe(true)
expect(result.next).toMatchObject({
id: 'gemini-compatible:gm-1',
apiMode: 'gemini-sdk',
gatewayRoute: 'official',
})
expect(result.summary.geminiApiModeCorrected).toBe(1)
expect(result.summary.routeForcedOfficial).toBe(1)
})
it('forces non-openai-compatible compat routes to official', () => {
const result = migrateProviderEntry({
id: 'openrouter',
gatewayRoute: 'openai-compat',
})
expect(result.changed).toBe(true)
expect(result.next).toMatchObject({
id: 'openrouter',
gatewayRoute: 'official',
})
expect(result.summary.routeForcedOfficial).toBe(1)
})
it('returns invalid status for malformed payload json', () => {
const result = migrateGatewayRoutePayload('{bad-json')
expect(result.status).toBe('invalid')
expect(result.summary.invalidPayload).toBe(true)
})
it('migrates mixed provider payload and reports aggregate stats', () => {
const result = migrateGatewayRoutePayload(JSON.stringify([
{
id: 'openai-compatible:oa-1',
gatewayRoute: 'litellm',
},
{
id: 'gemini-compatible:gm-1',
apiMode: 'openai-official',
gatewayRoute: 'openai-compat',
},
{
id: 'google',
gatewayRoute: 'official',
},
]))
expect(result.status).toBe('ok')
expect(result.changed).toBe(true)
expect(result.summary.providersScanned).toBe(3)
expect(result.summary.providersChanged).toBe(2)
expect(result.summary.routeLitellmToOpenaiCompat).toBe(1)
expect(result.summary.routeForcedOfficial).toBe(1)
expect(result.summary.geminiApiModeCorrected).toBe(1)
const nextPayload = JSON.parse(result.nextRaw || '[]') as Array<Record<string, unknown>>
expect(nextPayload[0]?.gatewayRoute).toBe('openai-compat')
expect(nextPayload[1]?.apiMode).toBe('gemini-sdk')
expect(nextPayload[1]?.gatewayRoute).toBe('official')
})
})
@@ -0,0 +1,55 @@
import { describe, expect, it } from 'vitest'
import {
addCharacterPromptSuffix,
addPropPromptSuffix,
CHARACTER_PROMPT_SUFFIX,
PROP_PROMPT_SUFFIX,
removeCharacterPromptSuffix,
removePropPromptSuffix,
} from '@/lib/constants'
function countOccurrences(input: string, target: string) {
if (!target) return 0
return input.split(target).length - 1
}
describe('character prompt suffix regression', () => {
it('appends suffix when generating prompt', () => {
const basePrompt = 'A brave knight in silver armor'
const generated = addCharacterPromptSuffix(basePrompt)
expect(generated).toContain(CHARACTER_PROMPT_SUFFIX)
expect(countOccurrences(generated, CHARACTER_PROMPT_SUFFIX)).toBe(1)
})
it('removes suffix text from prompt', () => {
const basePrompt = 'A calm detective with short black hair'
const withSuffix = addCharacterPromptSuffix(basePrompt)
const removed = removeCharacterPromptSuffix(withSuffix)
expect(removed).not.toContain(CHARACTER_PROMPT_SUFFIX)
expect(removed).toContain(basePrompt)
})
it('uses suffix as full prompt when base prompt is empty', () => {
expect(addCharacterPromptSuffix('')).toBe(CHARACTER_PROMPT_SUFFIX)
expect(removeCharacterPromptSuffix('')).toBe('')
})
it('appends the prop suffix exactly once', () => {
const basePrompt = '银质餐具套装,包含刀叉与汤匙,金属光泽冷白'
const generated = addPropPromptSuffix(basePrompt)
expect(generated).toContain(PROP_PROMPT_SUFFIX)
expect(countOccurrences(generated, PROP_PROMPT_SUFFIX)).toBe(1)
})
it('removes the prop suffix from prompts', () => {
const basePrompt = '黑铁长棍,两端包裹金色金属箍'
const withSuffix = addPropPromptSuffix(basePrompt)
const removed = removePropPromptSuffix(withSuffix)
expect(removed).not.toContain(PROP_PROMPT_SUFFIX)
expect(removed).toContain(basePrompt)
})
})
@@ -0,0 +1,22 @@
import { describe, expect, it } from 'vitest'
import { isBillableTaskType } from '@/lib/billing/task-policy'
import { getLLMTaskPolicy } from '@/lib/llm-observe/task-policy'
import { getTaskTypeLabel } from '@/lib/task/progress-message'
import { resolveTaskIntent } from '@/lib/task/intent'
import { TASK_TYPE } from '@/lib/task/types'
describe('prop modify task registration', () => {
it('registers project prop modify tasks across task metadata helpers', () => {
expect(resolveTaskIntent(TASK_TYPE.AI_MODIFY_PROP)).toBe('modify')
expect(getTaskTypeLabel(TASK_TYPE.AI_MODIFY_PROP)).toBe('progress.taskType.aiModifyProp')
expect(isBillableTaskType(TASK_TYPE.AI_MODIFY_PROP)).toBe(true)
expect(getLLMTaskPolicy(TASK_TYPE.AI_MODIFY_PROP).consoleEnabled).toBe(true)
})
it('registers asset-hub prop modify tasks across task metadata helpers', () => {
expect(resolveTaskIntent(TASK_TYPE.ASSET_HUB_AI_MODIFY_PROP)).toBe('modify')
expect(getTaskTypeLabel(TASK_TYPE.ASSET_HUB_AI_MODIFY_PROP)).toBe('progress.taskType.assetHubAiModifyProp')
expect(isBillableTaskType(TASK_TYPE.ASSET_HUB_AI_MODIFY_PROP)).toBe(true)
expect(getLLMTaskPolicy(TASK_TYPE.ASSET_HUB_AI_MODIFY_PROP).consoleEnabled).toBe(true)
})
})
@@ -0,0 +1,29 @@
import { describe, expect, it } from 'vitest'
import { readApiErrorMessage } from '@/lib/api/read-error-message'
function buildJsonResponse(body: unknown, status = 400): Response {
return new Response(JSON.stringify(body), {
status,
headers: { 'Content-Type': 'application/json' },
})
}
describe('readApiErrorMessage', () => {
it('returns nested api error message instead of [object Object]', async () => {
const response = buildJsonResponse({
error: {
code: 'INVALID_PARAMS',
message: 'Episode name is required',
},
message: 'Invalid parameters',
})
await expect(readApiErrorMessage(response, '创建失败')).resolves.toBe('Episode name is required')
})
it('falls back when the response body is not json', async () => {
const response = new Response('bad gateway', { status: 502 })
await expect(readApiErrorMessage(response, '创建失败')).resolves.toBe('创建失败')
})
})
@@ -0,0 +1,278 @@
import { afterEach, describe, expect, it, vi } from 'vitest'
import { subscribeRecoveredRun } from '@/lib/query/hooks/run-stream/recovered-run-subscription'
function jsonResponse(payload: unknown, status = 200) {
return {
ok: status >= 200 && status < 300,
json: async () => payload,
}
}
async function waitForCondition(condition: () => boolean, timeoutMs = 1000) {
const startedAt = Date.now()
while (Date.now() - startedAt < timeoutMs) {
if (condition()) return
await new Promise((resolve) => setTimeout(resolve, 10))
}
throw new Error('condition not met before timeout')
}
describe('recovered run subscription', () => {
const originalFetch = globalThis.fetch
afterEach(() => {
vi.restoreAllMocks()
vi.unstubAllGlobals()
vi.useRealTimers()
if (originalFetch) {
globalThis.fetch = originalFetch
} else {
Reflect.deleteProperty(globalThis, 'fetch')
}
})
it('replays run events and keeps recovering when no terminal event is present', async () => {
const fetchMock = vi.fn().mockResolvedValue(
jsonResponse({
events: [
{
seq: 1,
eventType: 'step.start',
stepKey: 'clip_1_phase1',
attempt: 1,
payload: {
stepTitle: '分镜规划',
stepIndex: 1,
stepTotal: 4,
message: 'running',
},
createdAt: '2026-02-28T00:00:01.000Z',
},
],
}),
)
globalThis.fetch = fetchMock as unknown as typeof fetch
const applyAndCapture = vi.fn()
const onSettled = vi.fn()
const cleanup = subscribeRecoveredRun({
runId: 'run-1',
taskStreamTimeoutMs: 10_000,
applyAndCapture,
onSettled,
})
await waitForCondition(() => fetchMock.mock.calls.length > 0 && applyAndCapture.mock.calls.length > 0)
expect(fetchMock).toHaveBeenCalledWith(
'/api/runs/run-1/events?afterSeq=0&limit=500',
expect.objectContaining({ method: 'GET', cache: 'no-store' }),
)
expect(applyAndCapture).toHaveBeenCalledWith(expect.objectContaining({
event: 'step.start',
runId: 'run-1',
stepId: 'clip_1_phase1',
}))
expect(onSettled).not.toHaveBeenCalled()
cleanup()
})
it('settles recovery when replay hits terminal run event', async () => {
const fetchMock = vi.fn().mockResolvedValue(
jsonResponse({
events: [
{
seq: 1,
eventType: 'run.error',
payload: {
message: 'exception TypeError: fetch failed sending request',
},
createdAt: '2026-02-28T00:00:02.000Z',
},
],
}),
)
globalThis.fetch = fetchMock as unknown as typeof fetch
const applyAndCapture = vi.fn()
const onSettled = vi.fn()
subscribeRecoveredRun({
runId: 'run-1',
taskStreamTimeoutMs: 10_000,
applyAndCapture,
onSettled,
})
await waitForCondition(() => onSettled.mock.calls.length === 1 && applyAndCapture.mock.calls.length > 0)
expect(onSettled).toHaveBeenCalledTimes(1)
expect(applyAndCapture).toHaveBeenCalledWith(expect.objectContaining({
event: 'run.error',
runId: 'run-1',
}))
})
it('replays step.chunk output so refresh keeps prior text', async () => {
const fetchMock = vi.fn().mockResolvedValue(
jsonResponse({
events: [
{
seq: 1,
eventType: 'step.chunk',
stepKey: 'clip_1_phase1',
payload: {
stream: {
kind: 'text',
lane: 'main',
seq: 1,
delta: '旧输出',
},
},
createdAt: '2026-02-28T00:00:03.000Z',
},
],
}),
)
globalThis.fetch = fetchMock as unknown as typeof fetch
const applyAndCapture = vi.fn()
const onSettled = vi.fn()
const cleanup = subscribeRecoveredRun({
runId: 'run-1',
taskStreamTimeoutMs: 10_000,
applyAndCapture,
onSettled,
})
await waitForCondition(() => applyAndCapture.mock.calls.some((call) => call[0]?.event === 'step.chunk'))
expect(applyAndCapture).toHaveBeenCalledWith(expect.objectContaining({
event: 'step.chunk',
runId: 'run-1',
stepId: 'clip_1_phase1',
textDelta: '旧输出',
}))
cleanup()
})
it('emits run.error and settles when idle timeout is reached', async () => {
vi.useFakeTimers()
const fetchMock = vi.fn().mockResolvedValue(
jsonResponse({
events: [],
}),
)
globalThis.fetch = fetchMock as unknown as typeof fetch
const applyAndCapture = vi.fn()
const onSettled = vi.fn()
subscribeRecoveredRun({
runId: 'run-timeout',
taskStreamTimeoutMs: 3_000,
applyAndCapture,
onSettled,
})
await vi.advanceTimersByTimeAsync(3_200)
expect(onSettled).toHaveBeenCalledTimes(1)
expect(applyAndCapture).toHaveBeenCalledWith(expect.objectContaining({
event: 'run.error',
runId: 'run-timeout',
message: 'run stream timeout: run-timeout',
}))
vi.useRealTimers()
})
it('resets idle timeout when a new event arrives during recovery', async () => {
vi.useFakeTimers()
let eventFetchCount = 0
const fetchMock = vi.fn().mockImplementation(async () => {
eventFetchCount += 1
if (eventFetchCount === 2) {
return jsonResponse({
events: [
{
seq: 1,
eventType: 'run.start',
payload: { message: 'resumed' },
createdAt: '2026-02-28T00:00:01.500Z',
},
],
})
}
return jsonResponse({ events: [] })
})
globalThis.fetch = fetchMock as unknown as typeof fetch
const applyAndCapture = vi.fn()
const onSettled = vi.fn()
subscribeRecoveredRun({
runId: 'run-recover',
taskStreamTimeoutMs: 3_000,
applyAndCapture,
onSettled,
})
await vi.advanceTimersByTimeAsync(3_200)
expect(onSettled).not.toHaveBeenCalled()
await vi.advanceTimersByTimeAsync(2_000)
expect(onSettled).toHaveBeenCalledTimes(1)
expect(applyAndCapture).toHaveBeenCalledWith(expect.objectContaining({
event: 'run.start',
runId: 'run-recover',
}))
vi.useRealTimers()
})
it('reconciles run snapshot to failed when event polling stays empty', async () => {
vi.useFakeTimers()
const fetchMock = vi.fn().mockImplementation(async (input: RequestInfo | URL) => {
const url = String(input)
if (url.includes('/api/runs/run-reconcile/events')) {
return jsonResponse({ events: [] })
}
if (url === '/api/runs/run-reconcile') {
return jsonResponse({
run: {
id: 'run-reconcile',
status: 'failed',
errorMessage: 'Ark Responses 调用失败',
},
})
}
return jsonResponse({ events: [] })
})
globalThis.fetch = fetchMock as unknown as typeof fetch
const applyAndCapture = vi.fn()
const onSettled = vi.fn()
subscribeRecoveredRun({
runId: 'run-reconcile',
taskStreamTimeoutMs: 20_000,
applyAndCapture,
onSettled,
})
await vi.advanceTimersByTimeAsync(3_500)
expect(onSettled).toHaveBeenCalledTimes(1)
expect(fetchMock).toHaveBeenCalledWith(
'/api/runs/run-reconcile',
expect.objectContaining({ method: 'GET', cache: 'no-store' }),
)
expect(applyAndCapture).toHaveBeenCalledWith(expect.objectContaining({
event: 'run.error',
runId: 'run-reconcile',
message: 'Ark Responses 调用失败',
}))
vi.useRealTimers()
})
})
+49
View File
@@ -0,0 +1,49 @@
import { afterEach, describe, expect, it, vi } from 'vitest'
import {
recoveryProbeTestUtils,
startRecoveryProbe,
} from '@/lib/query/hooks/run-stream/recovery-probe'
describe('recovery probe', () => {
afterEach(() => {
vi.useRealTimers()
recoveryProbeTestUtils.clearSuccessfulProbeScopes()
})
it('retries active run recovery when the first probe misses and a later probe finds a run', async () => {
vi.useFakeTimers()
const resolveActiveRunId = vi
.fn<({ projectId, storageScopeKey }: { projectId: string; storageScopeKey?: string }) => Promise<string | null>>()
.mockResolvedValueOnce(null)
.mockResolvedValueOnce('run-2')
const onRecovered = vi.fn()
const cleanup = startRecoveryProbe({
projectId: 'project-1',
storageKey: 'scope:story-to-script:episode-1',
storageScopeKey: 'episode-1',
hasRunState: () => false,
resolveActiveRunId,
onRecovered,
})
await vi.advanceTimersByTimeAsync(0)
expect(resolveActiveRunId).toHaveBeenCalledTimes(1)
expect(resolveActiveRunId).toHaveBeenLastCalledWith({
projectId: 'project-1',
storageScopeKey: 'episode-1',
})
expect(onRecovered).not.toHaveBeenCalled()
await vi.advanceTimersByTimeAsync(
recoveryProbeTestUtils.PROBE_RETRY_INTERVAL_MS,
)
expect(resolveActiveRunId).toHaveBeenCalledTimes(2)
expect(onRecovered).toHaveBeenCalledTimes(1)
expect(onRecovered).toHaveBeenCalledWith('run-2')
cleanup()
})
})
@@ -0,0 +1,54 @@
import { describe, expect, it } from 'vitest'
import { parseReferenceImages, readBoolean, readString } from '@/lib/workers/handlers/reference-to-character-helpers'
describe('reference-to-character helpers', () => {
it('parses and trims single reference image', () => {
expect(parseReferenceImages({ referenceImageUrl: ' https://x/a.png ' })).toEqual(['https://x/a.png'])
})
it('parses multi reference images and truncates to max 5', () => {
expect(
parseReferenceImages({
referenceImageUrls: [
'https://x/1.png',
'https://x/2.png',
'https://x/3.png',
'https://x/4.png',
'https://x/5.png',
'https://x/6.png',
],
}),
).toEqual([
'https://x/1.png',
'https://x/2.png',
'https://x/3.png',
'https://x/4.png',
'https://x/5.png',
])
})
it('filters empty values', () => {
expect(
parseReferenceImages({
referenceImageUrls: [' ', '\n', 'https://x/ok.png'],
}),
).toEqual(['https://x/ok.png'])
})
it('readString trims and normalizes invalid values', () => {
expect(readString(' abc ')).toBe('abc')
expect(readString(1)).toBe('')
expect(readString(null)).toBe('')
})
it('readBoolean supports boolean/number/string flags', () => {
expect(readBoolean(true)).toBe(true)
expect(readBoolean(1)).toBe(true)
expect(readBoolean('true')).toBe(true)
expect(readBoolean('YES')).toBe(true)
expect(readBoolean('on')).toBe(true)
expect(readBoolean('0')).toBe(false)
expect(readBoolean(false)).toBe(false)
expect(readBoolean(0)).toBe(false)
})
})
@@ -0,0 +1,56 @@
import { describe, expect, it } from 'vitest'
import { NextRequest } from 'next/server'
import {
parseSyncFlag,
resolveDisplayMode,
resolvePositiveInteger,
shouldRunSyncTask,
} from '@/lib/llm-observe/route-task'
function buildRequest(path: string, headers?: Record<string, string>) {
return new NextRequest(new URL(path, 'http://localhost'), {
method: 'POST',
headers: headers || {},
})
}
describe('route-task helpers', () => {
it('parseSyncFlag supports boolean-like values', () => {
expect(parseSyncFlag(true)).toBe(true)
expect(parseSyncFlag(1)).toBe(true)
expect(parseSyncFlag('1')).toBe(true)
expect(parseSyncFlag('true')).toBe(true)
expect(parseSyncFlag('yes')).toBe(true)
expect(parseSyncFlag('on')).toBe(true)
expect(parseSyncFlag('false')).toBe(false)
expect(parseSyncFlag(0)).toBe(false)
})
it('shouldRunSyncTask true when internal task header exists', () => {
const req = buildRequest('/api/test', { 'x-internal-task-id': 'task-1' })
expect(shouldRunSyncTask(req, {})).toBe(true)
})
it('shouldRunSyncTask true when body sync flag exists', () => {
const req = buildRequest('/api/test')
expect(shouldRunSyncTask(req, { sync: 'true' })).toBe(true)
})
it('shouldRunSyncTask true when query sync flag exists', () => {
const req = buildRequest('/api/test?sync=1')
expect(shouldRunSyncTask(req, {})).toBe(true)
})
it('resolveDisplayMode falls back to default on invalid value', () => {
expect(resolveDisplayMode('detail', 'loading')).toBe('detail')
expect(resolveDisplayMode('loading', 'detail')).toBe('loading')
expect(resolveDisplayMode('invalid', 'loading')).toBe('loading')
})
it('resolvePositiveInteger returns safe integer fallback', () => {
expect(resolvePositiveInteger(2.9, 1)).toBe(2)
expect(resolvePositiveInteger('9', 1)).toBe(9)
expect(resolvePositiveInteger('0', 7)).toBe(7)
expect(resolvePositiveInteger('abc', 7)).toBe(7)
})
})
@@ -0,0 +1,278 @@
import { describe, expect, it, vi } from 'vitest'
import { executeRunRequest } from '@/lib/query/hooks/run-stream/run-request-executor'
import type { RunStreamEvent } from '@/lib/novel-promotion/run-stream/types'
function jsonResponse(payload: unknown, status = 200) {
return new Response(JSON.stringify(payload), {
status,
headers: {
'content-type': 'application/json',
},
})
}
describe('run-request-executor run events path', () => {
it('uses /api/runs/:runId/events when async response includes runId', async () => {
const fetchMock = vi.fn<typeof fetch>()
fetchMock
.mockResolvedValueOnce(jsonResponse({
success: true,
async: true,
taskId: 'task_1',
runId: 'run_1',
}))
.mockResolvedValueOnce(jsonResponse({
runId: 'run_1',
afterSeq: 0,
events: [
{
seq: 1,
eventType: 'run.start',
payload: { message: 'started' },
createdAt: '2026-02-28T00:00:00.000Z',
},
{
seq: 2,
eventType: 'step.start',
stepKey: 'step_a',
attempt: 1,
payload: {
stepTitle: 'Step A',
stepIndex: 1,
stepTotal: 1,
},
createdAt: '2026-02-28T00:00:01.000Z',
},
{
seq: 3,
eventType: 'step.chunk',
stepKey: 'step_a',
attempt: 1,
lane: 'text',
payload: {
stream: {
delta: 'hello',
seq: 1,
},
},
createdAt: '2026-02-28T00:00:01.100Z',
},
{
seq: 4,
eventType: 'step.complete',
stepKey: 'step_a',
attempt: 1,
payload: {
text: 'hello',
},
createdAt: '2026-02-28T00:00:02.000Z',
},
{
seq: 5,
eventType: 'run.complete',
payload: {
summary: { ok: true },
},
createdAt: '2026-02-28T00:00:03.000Z',
},
],
}))
const originalFetch = globalThis.fetch
globalThis.fetch = fetchMock
try {
const captured: RunStreamEvent[] = []
const controller = new AbortController()
const result = await executeRunRequest({
endpointUrl: '/api/novel-promotion/project_1/story-to-script-stream',
requestBody: { episodeId: 'episode_1' },
controller,
taskStreamTimeoutMs: 30_000,
applyAndCapture: (event) => {
captured.push(event)
},
finalResultRef: { current: null },
})
expect(result.status).toBe('completed')
expect(result.runId).toBe('run_1')
expect(captured.some((event) => event.event === 'step.chunk' && event.textDelta === 'hello')).toBe(true)
expect(fetchMock.mock.calls[1]?.[0]).toBe('/api/runs/run_1/events?afterSeq=0&limit=500')
} finally {
globalThis.fetch = originalFetch
}
})
it('surfaces run-events fetch errors instead of swallowing them', async () => {
const fetchMock = vi.fn<typeof fetch>()
fetchMock
.mockResolvedValueOnce(jsonResponse({
success: true,
async: true,
taskId: 'task_1',
runId: 'run_1',
}))
.mockResolvedValueOnce(jsonResponse({
error: {
message: 'events backend unavailable',
},
}, 503))
const originalFetch = globalThis.fetch
globalThis.fetch = fetchMock
try {
const controller = new AbortController()
await expect(executeRunRequest({
endpointUrl: '/api/novel-promotion/project_1/story-to-script-stream',
requestBody: { episodeId: 'episode_1' },
controller,
taskStreamTimeoutMs: 30_000,
applyAndCapture: () => undefined,
finalResultRef: { current: null },
})).rejects.toThrow('run events fetch failed (HTTP 503): events backend unavailable')
} finally {
globalThis.fetch = originalFetch
}
})
it('uses idle timeout and resets the timer when new events arrive', async () => {
vi.useFakeTimers()
const fetchMock = vi.fn<typeof fetch>()
let eventsRequestCount = 0
fetchMock.mockImplementation(async (input: RequestInfo | URL) => {
const url = String(input)
if (url.includes('/story-to-script-stream')) {
return jsonResponse({
success: true,
async: true,
taskId: 'task_1',
runId: 'run_1',
})
}
if (url === '/api/runs/run_1') {
return jsonResponse({
run: {
id: 'run_1',
status: 'running',
},
})
}
if (!url.includes('/api/runs/run_1/events')) {
return jsonResponse({ events: [] })
}
eventsRequestCount += 1
if (eventsRequestCount === 3) {
return jsonResponse({
events: [
{
seq: 1,
eventType: 'run.start',
payload: { message: 'started' },
createdAt: '2026-02-28T00:00:03.000Z',
},
],
})
}
return jsonResponse({ events: [] })
})
const originalFetch = globalThis.fetch
globalThis.fetch = fetchMock
try {
const controller = new AbortController()
let settled = false
const request = executeRunRequest({
endpointUrl: '/api/novel-promotion/project_1/story-to-script-stream',
requestBody: { episodeId: 'episode_1' },
controller,
taskStreamTimeoutMs: 3_000,
applyAndCapture: () => undefined,
finalResultRef: { current: null },
}).finally(() => {
settled = true
})
await vi.advanceTimersByTimeAsync(5_000)
expect(settled).toBe(false)
await vi.advanceTimersByTimeAsync(3_000)
await expect(request).resolves.toEqual(expect.objectContaining({
runId: 'run_1',
status: 'failed',
errorMessage: 'run stream timeout: run_1',
}))
} finally {
vi.useRealTimers()
globalThis.fetch = originalFetch
}
})
it('reconciles terminal failed run status when events stream has no new rows', async () => {
vi.useFakeTimers()
const fetchMock = vi.fn<typeof fetch>()
fetchMock.mockImplementation(async (input: RequestInfo | URL) => {
const url = String(input)
if (url.includes('/story-to-script-stream')) {
return jsonResponse({
success: true,
async: true,
taskId: 'task_2',
runId: 'run_2',
})
}
if (url.includes('/api/runs/run_2/events')) {
return jsonResponse({ events: [] })
}
if (url === '/api/runs/run_2') {
return jsonResponse({
run: {
id: 'run_2',
status: 'failed',
errorMessage: 'Ark Responses 调用失败',
},
})
}
return jsonResponse({ events: [] })
})
const originalFetch = globalThis.fetch
globalThis.fetch = fetchMock
try {
const captured: RunStreamEvent[] = []
const controller = new AbortController()
const request = executeRunRequest({
endpointUrl: '/api/novel-promotion/project_1/story-to-script-stream',
requestBody: { episodeId: 'episode_1' },
controller,
taskStreamTimeoutMs: 30_000,
applyAndCapture: (event) => {
captured.push(event)
},
finalResultRef: { current: null },
})
await vi.advanceTimersByTimeAsync(3_500)
await expect(request).resolves.toEqual(expect.objectContaining({
runId: 'run_2',
status: 'failed',
errorMessage: 'Ark Responses 调用失败',
}))
expect(fetchMock).toHaveBeenCalledWith(
'/api/runs/run_2',
expect.objectContaining({ method: 'GET', cache: 'no-store' }),
)
expect(captured.some((event) => event.event === 'run.error' && event.message === 'Ark Responses 调用失败')).toBe(true)
} finally {
vi.useRealTimers()
globalThis.fetch = originalFetch
}
})
})
@@ -0,0 +1,370 @@
import { describe, expect, it } from 'vitest'
import type { RunStreamEvent } from '@/lib/novel-promotion/run-stream/types'
import { applyRunStreamEvent, getStageOutput } from '@/lib/query/hooks/run-stream/state-machine'
function applySequence(events: RunStreamEvent[]) {
let state = null
for (const event of events) {
state = applyRunStreamEvent(state, event)
}
return state
}
describe('run stream state-machine', () => {
it('marks unfinished steps as failed when run.error arrives', () => {
const runId = 'run-1'
const state = applySequence([
{ runId, event: 'run.start', ts: '2026-02-26T23:00:00.000Z', status: 'running' },
{
runId,
event: 'step.start',
ts: '2026-02-26T23:00:01.000Z',
status: 'running',
stepId: 'step-a',
stepTitle: 'A',
stepIndex: 1,
stepTotal: 2,
},
{
runId,
event: 'step.complete',
ts: '2026-02-26T23:00:02.000Z',
status: 'completed',
stepId: 'step-b',
stepTitle: 'B',
stepIndex: 2,
stepTotal: 2,
text: 'ok',
},
{
runId,
event: 'run.error',
ts: '2026-02-26T23:00:03.000Z',
status: 'failed',
message: 'exception TypeError: fetch failed sending request',
},
])
expect(state?.status).toBe('failed')
expect(state?.stepsById['step-a']?.status).toBe('failed')
expect(state?.stepsById['step-a']?.errorMessage).toContain('fetch failed')
expect(state?.stepsById['step-b']?.status).toBe('completed')
})
it('returns readable error output for failed step without stream text', () => {
const output = getStageOutput({
id: 'step-failed',
attempt: 1,
title: 'failed',
stepIndex: 1,
stepTotal: 1,
status: 'failed',
dependsOn: [],
blockedBy: [],
groupId: null,
parallelKey: null,
retryable: true,
textOutput: '',
reasoningOutput: '',
textLength: 0,
reasoningLength: 0,
message: '',
errorMessage: 'exception TypeError: fetch failed sending request',
updatedAt: Date.now(),
seqByLane: {
text: 0,
reasoning: 0,
},
})
expect(output).toContain('【错误】')
expect(output).toContain('fetch failed sending request')
})
it('merges retry attempts into one step instead of duplicating stage entries', () => {
const runId = 'run-2'
const state = applySequence([
{ runId, event: 'run.start', ts: '2026-02-26T23:00:00.000Z', status: 'running' },
{
runId,
event: 'step.start',
ts: '2026-02-26T23:00:01.000Z',
status: 'running',
stepId: 'clip_x_phase1',
stepTitle: 'A',
stepIndex: 1,
stepTotal: 1,
},
{
runId,
event: 'step.chunk',
ts: '2026-02-26T23:00:01.100Z',
status: 'running',
stepId: 'clip_x_phase1',
lane: 'text',
seq: 1,
textDelta: 'first-attempt',
},
{
runId,
event: 'step.start',
ts: '2026-02-26T23:00:02.000Z',
status: 'running',
stepId: 'clip_x_phase1_r2',
stepTitle: 'A',
stepIndex: 1,
stepTotal: 1,
},
{
runId,
event: 'step.chunk',
ts: '2026-02-26T23:00:02.100Z',
status: 'running',
stepId: 'clip_x_phase1_r2',
lane: 'text',
seq: 1,
textDelta: 'retry-output',
},
])
expect(state?.stepOrder).toEqual(['clip_x_phase1'])
expect(state?.stepsById['clip_x_phase1']?.attempt).toBe(2)
expect(state?.stepsById['clip_x_phase1']?.textOutput).toBe('retry-output')
})
it('resets step output when a higher stepAttempt starts and ignores stale lower attempt chunks', () => {
const runId = 'run-3'
const state = applySequence([
{ runId, event: 'run.start', ts: '2026-02-26T23:00:00.000Z', status: 'running' },
{
runId,
event: 'step.start',
ts: '2026-02-26T23:00:01.000Z',
status: 'running',
stepId: 'clip_y_phase1',
stepAttempt: 1,
stepTitle: 'A',
stepIndex: 1,
stepTotal: 1,
},
{
runId,
event: 'step.chunk',
ts: '2026-02-26T23:00:01.100Z',
status: 'running',
stepId: 'clip_y_phase1',
stepAttempt: 1,
lane: 'text',
seq: 1,
textDelta: 'old-output',
},
{
runId,
event: 'step.start',
ts: '2026-02-26T23:00:02.000Z',
status: 'running',
stepId: 'clip_y_phase1',
stepAttempt: 2,
stepTitle: 'A',
stepIndex: 1,
stepTotal: 1,
},
{
runId,
event: 'step.chunk',
ts: '2026-02-26T23:00:02.100Z',
status: 'running',
stepId: 'clip_y_phase1',
stepAttempt: 1,
lane: 'text',
seq: 2,
textDelta: 'should-be-ignored',
},
{
runId,
event: 'step.chunk',
ts: '2026-02-26T23:00:02.200Z',
status: 'running',
stepId: 'clip_y_phase1',
stepAttempt: 2,
lane: 'text',
seq: 1,
textDelta: 'new-output',
},
])
expect(state?.stepsById['clip_y_phase1']?.attempt).toBe(2)
expect(state?.stepsById['clip_y_phase1']?.textOutput).toBe('new-output')
})
it('reopens completed step when late chunk arrives, then finalizes on run.complete', () => {
const runId = 'run-4'
const state = applySequence([
{ runId, event: 'run.start', ts: '2026-02-26T23:00:00.000Z', status: 'running' },
{
runId,
event: 'step.start',
ts: '2026-02-26T23:00:01.000Z',
status: 'running',
stepId: 'analyze_characters',
stepTitle: 'characters',
stepIndex: 1,
stepTotal: 2,
},
{
runId,
event: 'step.complete',
ts: '2026-02-26T23:00:02.000Z',
status: 'completed',
stepId: 'analyze_characters',
stepTitle: 'characters',
stepIndex: 1,
stepTotal: 2,
text: 'partial',
},
{
runId,
event: 'step.chunk',
ts: '2026-02-26T23:00:02.100Z',
status: 'running',
stepId: 'analyze_characters',
lane: 'text',
seq: 2,
textDelta: '-tail',
},
{
runId,
event: 'run.complete',
ts: '2026-02-26T23:00:03.000Z',
status: 'completed',
payload: { ok: true },
},
])
expect(state?.status).toBe('completed')
expect(state?.stepsById['analyze_characters']?.status).toBe('completed')
expect(state?.stepsById['analyze_characters']?.textOutput).toBe('partial-tail')
})
it('moves activeStepId to the latest step when no step is running', () => {
const runId = 'run-5'
const state = applySequence([
{ runId, event: 'run.start', ts: '2026-02-26T23:00:00.000Z', status: 'running' },
{
runId,
event: 'step.complete',
ts: '2026-02-26T23:00:01.000Z',
status: 'completed',
stepId: 'step-1',
stepTitle: 'step 1',
stepIndex: 1,
stepTotal: 2,
text: 'a',
},
{
runId,
event: 'step.complete',
ts: '2026-02-26T23:00:02.000Z',
status: 'completed',
stepId: 'step-2',
stepTitle: 'step 2',
stepIndex: 2,
stepTotal: 2,
text: 'b',
},
])
expect(state?.activeStepId).toBe('step-2')
})
it('marks step as blocked when blockedBy is present', () => {
const runId = 'run-6'
const state = applySequence([
{ runId, event: 'run.start', ts: '2026-02-26T23:00:00.000Z', status: 'running' },
{
runId,
event: 'step.start',
ts: '2026-02-26T23:00:01.000Z',
status: 'running',
stepId: 'step-b',
stepTitle: 'B',
stepIndex: 2,
stepTotal: 2,
blockedBy: ['step-a'],
},
])
expect(state?.stepsById['step-b']?.status).toBe('blocked')
expect(state?.stepsById['step-b']?.blockedBy).toEqual(['step-a'])
})
it('auto-follows active step when selected step was not manually pinned', () => {
const runId = 'run-7'
const state = applySequence([
{ runId, event: 'run.start', ts: '2026-02-26T23:00:00.000Z', status: 'running' },
{
runId,
event: 'step.start',
ts: '2026-02-26T23:00:01.000Z',
status: 'running',
stepId: 'step-1',
stepTitle: 'step 1',
stepIndex: 1,
stepTotal: 2,
},
{
runId,
event: 'step.complete',
ts: '2026-02-26T23:00:02.000Z',
status: 'completed',
stepId: 'step-1',
stepTitle: 'step 1',
stepIndex: 1,
stepTotal: 2,
},
{
runId,
event: 'step.start',
ts: '2026-02-26T23:00:03.000Z',
status: 'running',
stepId: 'step-2',
stepTitle: 'step 2',
stepIndex: 2,
stepTotal: 2,
},
])
expect(state?.activeStepId).toBe('step-2')
expect(state?.selectedStepId).toBe('step-2')
})
it('moves think-tagged text chunks into reasoning output', () => {
const runId = 'run-8'
const state = applySequence([
{ runId, event: 'run.start', ts: '2026-02-26T23:00:00.000Z', status: 'running' },
{
runId,
event: 'step.start',
ts: '2026-02-26T23:00:01.000Z',
status: 'running',
stepId: 'analyze_locations',
stepTitle: 'locations',
stepIndex: 2,
stepTotal: 2,
},
{
runId,
event: 'step.chunk',
ts: '2026-02-26T23:00:01.200Z',
status: 'running',
stepId: 'analyze_locations',
lane: 'text',
seq: 1,
textDelta: '<think>先分析文本</think>{"locations":[]}',
},
])
expect(state?.stepsById['analyze_locations']?.reasoningOutput).toBe('先分析文本')
expect(state?.stepsById['analyze_locations']?.textOutput).toBe('{"locations":[]}')
})
})
+174
View File
@@ -0,0 +1,174 @@
import { describe, expect, it } from 'vitest'
import { deriveRunStreamView } from '@/lib/query/hooks/run-stream/run-stream-view'
import type { RunState, RunStepState } from '@/lib/query/hooks/run-stream/types'
function buildStep(overrides: Partial<RunStepState> = {}): RunStepState {
return {
id: 'step-1',
attempt: 1,
title: 'step',
stepIndex: 1,
stepTotal: 1,
status: 'running',
dependsOn: [],
blockedBy: [],
groupId: null,
parallelKey: null,
retryable: true,
textOutput: '',
reasoningOutput: '',
textLength: 0,
reasoningLength: 0,
message: '',
errorMessage: '',
updatedAt: Date.now(),
seqByLane: {
text: 0,
reasoning: 0,
},
...overrides,
}
}
function buildRunState(overrides: Partial<RunState> = {}): RunState {
const baseStep = buildStep()
return {
runId: 'run-1',
status: 'running',
startedAt: Date.now(),
updatedAt: Date.now(),
terminalAt: null,
errorMessage: '',
summary: null,
payload: null,
stepsById: {
[baseStep.id]: baseStep,
},
stepOrder: [baseStep.id],
activeStepId: baseStep.id,
selectedStepId: baseStep.id,
...overrides,
}
}
describe('run stream view', () => {
it('keeps console visible for recovered running state', () => {
const state = buildRunState({
status: 'running',
terminalAt: null,
})
const view = deriveRunStreamView({
runState: state,
isLiveRunning: false,
clock: Date.now(),
})
expect(view.isVisible).toBe(true)
})
it('shows run error in output when run failed and selected step has no output', () => {
const state = buildRunState({
status: 'failed',
errorMessage: 'exception TypeError: fetch failed sending request',
stepsById: {
'step-1': buildStep({ status: 'running' }),
},
})
const view = deriveRunStreamView({
runState: state,
isLiveRunning: false,
clock: Date.now(),
})
expect(view.outputText).toContain('【错误】')
expect(view.outputText).toContain('fetch failed sending request')
})
it('shows run error in output when run failed before any step starts', () => {
const state = buildRunState({
status: 'failed',
errorMessage: 'NETWORK_ERROR',
stepsById: {},
stepOrder: [],
activeStepId: null,
selectedStepId: null,
})
const view = deriveRunStreamView({
runState: state,
isLiveRunning: false,
clock: Date.now(),
})
expect(view.outputText).toBe('【错误】\nNETWORK_ERROR')
})
it('keeps failed run visible until user reset', () => {
const state = buildRunState({
status: 'failed',
terminalAt: Date.now() - 60_000,
errorMessage: 'failed',
})
const view = deriveRunStreamView({
runState: state,
isLiveRunning: false,
clock: Date.now(),
})
expect(view.isVisible).toBe(true)
})
it('hides completed run console after stream settles', () => {
const state = buildRunState({
status: 'completed',
terminalAt: Date.now() - 30_000,
})
const view = deriveRunStreamView({
runState: state,
isLiveRunning: false,
clock: Date.now(),
})
expect(view.isVisible).toBe(false)
})
it('uses active step message instead of selected completed step message', () => {
const completedStep = buildStep({
id: 'step-1',
title: 'step 1',
status: 'completed',
message: 'progress.runtime.llm.completed',
updatedAt: Date.now() - 1000,
})
const runningStep = buildStep({
id: 'step-2',
title: 'step 2',
stepIndex: 2,
stepTotal: 2,
status: 'running',
message: 'progress.runtime.stage.llmStreaming',
updatedAt: Date.now(),
})
const state = buildRunState({
stepsById: {
'step-1': completedStep,
'step-2': runningStep,
},
stepOrder: ['step-1', 'step-2'],
activeStepId: 'step-2',
selectedStepId: 'step-1',
})
const view = deriveRunStreamView({
runState: state,
isLiveRunning: false,
clock: Date.now(),
})
expect(view.activeMessage).toBe('progress.runtime.stage.llmStreaming')
})
})
@@ -0,0 +1,105 @@
import { describe, expect, it } from 'vitest'
import {
asBoolean,
asNonEmptyString,
asObject,
buildIdleState,
pairKey,
resolveTargetState,
toProgress,
} from '@/lib/task/state-service'
describe('task state service helpers', () => {
it('normalizes primitive parsing helpers', () => {
expect(pairKey('A', 'B')).toBe('A:B')
expect(asObject({ ok: true })).toEqual({ ok: true })
expect(asObject(['x'])).toBeNull()
expect(asNonEmptyString(' x ')).toBe('x')
expect(asNonEmptyString(' ')).toBeNull()
expect(asBoolean(true)).toBe(true)
expect(asBoolean('true')).toBeNull()
expect(toProgress(101)).toBe(100)
expect(toProgress(-5)).toBe(0)
expect(toProgress(Number.NaN)).toBeNull()
})
it('builds idle state when no tasks found', () => {
const idle = buildIdleState({ targetType: 'GlobalCharacter', targetId: 'c1' })
expect(idle.phase).toBe('idle')
expect(idle.runningTaskId).toBeNull()
expect(idle.lastError).toBeNull()
})
it('resolves processing state from active task', () => {
const state = resolveTargetState(
{ targetType: 'GlobalCharacter', targetId: 'c1' },
[
{
id: 'task-1',
type: 'asset_hub_image',
status: 'processing',
progress: 42,
payload: {
stage: 'image_generating',
stageLabel: 'Generating',
ui: { intent: 'create', hasOutputAtStart: false },
},
errorCode: null,
errorMessage: null,
updatedAt: new Date('2026-02-25T00:00:00.000Z'),
},
],
)
expect(state.phase).toBe('processing')
expect(state.runningTaskId).toBe('task-1')
expect(state.progress).toBe(42)
expect(state.stage).toBe('image_generating')
expect(state.stageLabel).toBe('Generating')
})
it('resolves failed state and normalizes error', () => {
const state = resolveTargetState(
{ targetType: 'GlobalCharacter', targetId: 'c1' },
[
{
id: 'task-2',
type: 'asset_hub_image',
status: 'failed',
progress: 100,
payload: { ui: { intent: 'modify', hasOutputAtStart: true } },
errorCode: 'INVALID_PARAMS',
errorMessage: 'bad input',
updatedAt: new Date('2026-02-25T00:00:00.000Z'),
},
],
)
expect(state.phase).toBe('failed')
expect(state.runningTaskId).toBeNull()
expect(state.lastError?.code).toBe('INVALID_PARAMS')
expect(state.lastError?.message).toBe('bad input')
})
it('treats canceled task as failed presentation state', () => {
const state = resolveTargetState(
{ targetType: 'GlobalCharacter', targetId: 'c1' },
[
{
id: 'task-3',
type: 'asset_hub_image',
status: 'canceled',
progress: 100,
payload: { ui: { intent: 'modify', hasOutputAtStart: true } },
errorCode: 'TASK_CANCELLED',
errorMessage: 'Task cancelled by user',
updatedAt: new Date('2026-02-25T00:00:00.000Z'),
},
],
)
expect(state.phase).toBe('failed')
expect(state.lastError?.code).toBe('CONFLICT')
expect(state.lastError?.message).toBe('Task cancelled by user')
})
})
@@ -0,0 +1,69 @@
import { describe, expect, it } from 'vitest'
import { TASK_TYPE } from '@/lib/task/types'
import { getTaskFlowMeta } from '@/lib/llm-observe/stage-pipeline'
import { isActiveTaskStatus, normalizeTaskPayload, shouldAttachNewTaskToReusableRun } from '@/lib/task/submitter'
describe('task submitter helpers', () => {
it('fills default flow metadata when payload misses flow fields', () => {
const type = TASK_TYPE.AI_CREATE_CHARACTER
const flow = getTaskFlowMeta(type)
const normalized = normalizeTaskPayload(type, {})
expect(normalized.flowId).toBe(flow.flowId)
expect(normalized.flowStageIndex).toBe(flow.flowStageIndex)
expect(normalized.flowStageTotal).toBe(flow.flowStageTotal)
expect(normalized.flowStageTitle).toBe(flow.flowStageTitle)
expect(normalized.meta).toMatchObject({
flowId: flow.flowId,
flowStageIndex: flow.flowStageIndex,
flowStageTotal: flow.flowStageTotal,
flowStageTitle: flow.flowStageTitle,
})
})
it('normalizes negative stage values', () => {
const normalized = normalizeTaskPayload(TASK_TYPE.ANALYZE_NOVEL, {
flowId: 'flow-a',
flowStageIndex: -9,
flowStageTotal: -1,
flowStageTitle: ' title ',
meta: {},
})
expect(normalized.flowId).toBe('flow-a')
expect(normalized.flowStageIndex).toBeGreaterThanOrEqual(1)
expect(normalized.flowStageTotal).toBeGreaterThanOrEqual(normalized.flowStageIndex)
expect(normalized.flowStageTitle).toBe('title')
})
it('prefers payload meta flow values when valid', () => {
const normalized = normalizeTaskPayload(TASK_TYPE.ANALYZE_NOVEL, {
flowId: 'outer-flow',
flowStageIndex: 1,
flowStageTotal: 2,
flowStageTitle: 'Outer',
meta: {
flowId: 'meta-flow',
flowStageIndex: 3,
flowStageTotal: 7,
flowStageTitle: 'Meta',
},
})
const meta = normalized.meta as Record<string, unknown>
expect(meta.flowId).toBe('meta-flow')
expect(meta.flowStageIndex).toBe(3)
expect(meta.flowStageTotal).toBe(7)
expect(meta.flowStageTitle).toBe('Meta')
})
it('reuses linked runs only while the existing task is still active', () => {
expect(isActiveTaskStatus('queued')).toBe(true)
expect(isActiveTaskStatus('processing')).toBe(true)
expect(isActiveTaskStatus('completed')).toBe(false)
expect(shouldAttachNewTaskToReusableRun('queued')).toBe(false)
expect(shouldAttachNewTaskToReusableRun('processing')).toBe(false)
expect(shouldAttachNewTaskToReusableRun('failed')).toBe(true)
expect(shouldAttachNewTaskToReusableRun(null)).toBe(true)
})
})
+122
View File
@@ -0,0 +1,122 @@
import { describe, expect, it, vi } from 'vitest'
import {
checkGithubReleaseUpdate,
compareSemver,
normalizeSemverTag,
shouldPulseUpdate,
} from '@/lib/update-check'
describe('update-check semver helpers', () => {
it('normalizes semver tag with v prefix', () => {
expect(normalizeSemverTag('v0.3.0')).toBe('0.3.0')
})
it('supports prerelease suffix while comparing base semver', () => {
expect(normalizeSemverTag('v0.3.0-rc.1')).toBe('0.3.0')
expect(compareSemver('0.3.0-rc.1', '0.2.9')).toBe(1)
})
it('throws for malformed semver', () => {
expect(() => normalizeSemverTag('0.3')).toThrowError('Invalid semver tag: 0.3')
})
it('compares semver in numeric order', () => {
expect(compareSemver('0.3.0', '0.2.9')).toBe(1)
expect(compareSemver('0.2.0', '0.2.0')).toBe(0)
expect(compareSemver('0.1.9', '0.2.0')).toBe(-1)
})
it('pulses only when this version was not muted', () => {
expect(shouldPulseUpdate('0.3.0', null)).toBe(true)
expect(shouldPulseUpdate('0.3.0', '0.2.9')).toBe(true)
expect(shouldPulseUpdate('0.3.0', '0.3.0')).toBe(false)
})
})
describe('checkGithubReleaseUpdate', () => {
it('returns no-release when GitHub has no releases yet', async () => {
const fetchMock = vi.fn<typeof fetch>().mockResolvedValue(new Response(null, { status: 404 }))
const result = await checkGithubReleaseUpdate({
repository: 'owner/repo',
currentVersion: '0.2.0',
fetchImpl: fetchMock,
})
expect(result).toEqual({ kind: 'no-release' })
})
it('returns update-available when latest release is newer', async () => {
const fetchMock = vi.fn<typeof fetch>().mockResolvedValue(new Response(
JSON.stringify({
tag_name: 'v0.3.0',
html_url: 'https://github.com/owner/repo/releases/tag/v0.3.0',
name: 'v0.3.0',
published_at: '2026-03-03T10:00:00Z',
}),
{ status: 200 },
))
const result = await checkGithubReleaseUpdate({
repository: 'owner/repo',
currentVersion: '0.2.0',
fetchImpl: fetchMock,
})
expect(result.kind).toBe('update-available')
if (result.kind !== 'update-available') {
throw new Error('expected update-available result')
}
expect(result.latestVersion).toBe('0.3.0')
expect(result.release.tagName).toBe('v0.3.0')
})
it('returns no-update when latest release equals current version', async () => {
const fetchMock = vi.fn<typeof fetch>().mockResolvedValue(new Response(
JSON.stringify({
tag_name: 'v0.2.0',
html_url: 'https://github.com/owner/repo/releases/tag/v0.2.0',
name: 'v0.2.0',
published_at: '2026-03-03T10:00:00Z',
}),
{ status: 200 },
))
const result = await checkGithubReleaseUpdate({
repository: 'owner/repo',
currentVersion: '0.2.0',
fetchImpl: fetchMock,
})
expect(result.kind).toBe('no-update')
if (result.kind !== 'no-update') {
throw new Error('expected no-update result')
}
expect(result.latestVersion).toBe('0.2.0')
})
it('returns error when release tag is not valid semver', async () => {
const fetchMock = vi.fn<typeof fetch>().mockResolvedValue(new Response(
JSON.stringify({
tag_name: 'release-2026-03-03',
html_url: 'https://github.com/owner/repo/releases/tag/release-2026-03-03',
}),
{ status: 200 },
))
const result = await checkGithubReleaseUpdate({
repository: 'owner/repo',
currentVersion: '0.2.0',
fetchImpl: fetchMock,
})
expect(result.kind).toBe('error')
if (result.kind !== 'error') {
throw new Error('expected error result')
}
expect(result.reason).toBe('invalid-version')
})
})
@@ -0,0 +1,36 @@
import { describe, expect, it } from 'vitest'
import { hasConfiguredAnalysisModel, readConfiguredAnalysisModel, shouldGuideToModelSetup } from '@/lib/workspace/model-setup'
describe('workspace model setup guidance', () => {
it('有 analysisModel -> 不需要引导设置', () => {
const payload = {
preference: {
analysisModel: 'openai::gpt-4.1',
},
}
expect(hasConfiguredAnalysisModel(payload)).toBe(true)
expect(readConfiguredAnalysisModel(payload)).toBe('openai::gpt-4.1')
expect(shouldGuideToModelSetup(payload)).toBe(false)
})
it('analysisModel 为空 -> 需要引导设置', () => {
const payload = {
preference: {
analysisModel: ' ',
},
}
expect(hasConfiguredAnalysisModel(payload)).toBe(false)
expect(readConfiguredAnalysisModel(payload)).toBeNull()
expect(shouldGuideToModelSetup(payload)).toBe(true)
})
it('payload 非法 -> 需要引导设置', () => {
expect(hasConfiguredAnalysisModel(null)).toBe(false)
expect(readConfiguredAnalysisModel(null)).toBeNull()
expect(hasConfiguredAnalysisModel({})).toBe(false)
expect(readConfiguredAnalysisModel({})).toBeNull()
expect(shouldGuideToModelSetup({})).toBe(true)
})
})
+50
View File
@@ -0,0 +1,50 @@
import { describe, expect, it, vi } from 'vitest'
import { expandHomeStory } from '@/lib/home/ai-story-expand'
vi.mock('@/lib/task/client', () => ({
resolveTaskResponse: vi.fn(),
}))
import { resolveTaskResponse } from '@/lib/task/client'
function buildJsonResponse(body: unknown, status = 200): Response {
return new Response(JSON.stringify(body), {
status,
headers: { 'Content-Type': 'application/json' },
})
}
describe('expandHomeStory', () => {
it('posts the prompt to the user ai-story-expand route and returns expanded text', async () => {
const apiFetch = vi.fn(async () => buildJsonResponse({ async: true, taskId: 'task-1' }))
vi.mocked(resolveTaskResponse).mockResolvedValue({
expandedText: '扩写后的故事正文',
})
const result = await expandHomeStory({
apiFetch,
prompt: '宫廷复仇女主回京',
})
expect(apiFetch).toHaveBeenCalledWith('/api/user/ai-story-expand', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
prompt: '宫廷复仇女主回京',
}),
})
expect(result).toEqual({
expandedText: '扩写后的故事正文',
})
})
it('fails explicitly when the route does not return expandedText', async () => {
const apiFetch = vi.fn(async () => buildJsonResponse({ async: true, taskId: 'task-1' }))
vi.mocked(resolveTaskResponse).mockResolvedValue({})
await expect(expandHomeStory({
apiFetch,
prompt: '宫廷复仇女主回京',
})).rejects.toThrow('AI story expand response missing expandedText')
})
})
@@ -0,0 +1,113 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
import {
buildHomeWorkspaceLaunchTarget,
createHomeProjectLaunch,
} from '@/lib/home/create-project-launch'
function buildJsonResponse(body: unknown, status = 200): Response {
return new Response(JSON.stringify(body), {
status,
headers: { 'Content-Type': 'application/json' },
})
}
describe('createHomeProjectLaunch', () => {
beforeEach(() => {
vi.restoreAllMocks()
})
it('creates project, config, first episode, and returns an auto-run workspace target', async () => {
const apiFetch = vi
.fn<(
input: string,
init?: RequestInit,
) => Promise<Response>>()
.mockResolvedValueOnce(buildJsonResponse({
project: { id: 'project-1' },
}, 201))
.mockResolvedValueOnce(buildJsonResponse({ success: true }, 200))
.mockResolvedValueOnce(buildJsonResponse({
episode: { id: 'episode-1' },
}, 201))
const result = await createHomeProjectLaunch({
apiFetch,
projectName: '开场白',
storyText: '第一章内容',
videoRatio: '9:16',
artStyle: 'american-comic',
episodeName: '第 1 集',
})
expect(apiFetch).toHaveBeenNthCalledWith(1, '/api/projects', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
name: '开场白',
}),
})
expect(apiFetch).toHaveBeenNthCalledWith(2, '/api/novel-promotion/project-1', {
method: 'PATCH',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
videoRatio: '9:16',
artStyle: 'american-comic',
}),
})
expect(apiFetch).toHaveBeenNthCalledWith(3, '/api/novel-promotion/project-1/episodes', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
name: '第 1 集',
novelText: '第一章内容',
}),
})
expect(result).toEqual({
projectId: 'project-1',
episodeId: 'episode-1',
target: {
pathname: '/workspace/project-1',
query: {
episode: 'episode-1',
autoRun: 'storyToScript',
},
},
})
})
it('fails explicitly when first episode creation does not return an episode id', async () => {
const apiFetch = vi
.fn<(
input: string,
init?: RequestInit,
) => Promise<Response>>()
.mockResolvedValueOnce(buildJsonResponse({
project: { id: 'project-1' },
}, 201))
.mockResolvedValueOnce(buildJsonResponse({ success: true }, 200))
.mockResolvedValueOnce(buildJsonResponse({
episode: {},
}, 201))
await expect(createHomeProjectLaunch({
apiFetch,
projectName: '开场白',
storyText: '第一章内容',
videoRatio: '9:16',
artStyle: 'american-comic',
episodeName: '第 1 集',
})).rejects.toThrow('Episode creation response missing episode id')
})
})
describe('buildHomeWorkspaceLaunchTarget', () => {
it('points workspace launch to the created episode and auto-runs story-to-script', () => {
expect(buildHomeWorkspaceLaunchTarget('project-9', 'episode-4')).toEqual({
pathname: '/workspace/project-9',
query: {
episode: 'episode-4',
autoRun: 'storyToScript',
},
})
})
})
+14
View File
@@ -0,0 +1,14 @@
import { describe, expect, it } from 'vitest'
import {
AUTHENTICATED_HOME_PATHNAME,
buildAuthenticatedHomeTarget,
} from '@/lib/home/default-route'
describe('authenticated home default route', () => {
it('uses /home as the only authenticated default pathname', () => {
expect(AUTHENTICATED_HOME_PATHNAME).toBe('/home')
expect(buildAuthenticatedHomeTarget()).toEqual({
pathname: '/home',
})
})
})
@@ -0,0 +1,113 @@
import * as React from 'react'
import { createElement } from 'react'
import { describe, expect, it, vi } from 'vitest'
import { renderToStaticMarkup } from 'react-dom/server'
import HomePage from '@/app/[locale]/home/page'
import {
HOME_QUICK_START_MIN_ROWS,
resolveTextareaTargetHeight,
} from '@/lib/ui/textarea-height'
vi.mock('next-auth/react', () => ({
useSession: () => ({
data: { user: { name: 'Earth' } },
status: 'authenticated',
}),
}))
vi.mock('next-intl', () => ({
useTranslations: (namespace: string) => (key: string) => `${namespace}.${key}`,
}))
vi.mock('@/components/Navbar', () => ({
default: () => createElement('nav', null, 'Navbar'),
}))
vi.mock('@/components/story-input/StoryInputComposer', () => ({
default: ({
minRows,
textareaClassName,
primaryAction,
secondaryActions,
}: {
minRows: number
textareaClassName?: string
primaryAction: React.ReactNode
secondaryActions?: React.ReactNode
}) => createElement(
'section',
{
'data-min-rows': String(minRows),
'data-textarea-class': textareaClassName,
},
secondaryActions,
primaryAction,
'StoryInputComposer',
),
}))
vi.mock('@/components/ui/icons', () => ({
AppIcon: ({ name, ...props }: { name: string } & Record<string, unknown>) =>
createElement('span', { ...props, 'data-icon': name }),
IconGradientDefs: (props: Record<string, unknown>) => createElement('span', props),
}))
vi.mock('@/i18n/navigation', () => ({
Link: ({
href,
children,
...props
}: {
href: string | { pathname: string }
children: React.ReactNode
} & Record<string, unknown>) => {
const resolvedHref = typeof href === 'string' ? href : href.pathname
return createElement('a', { href: resolvedHref, ...props }, children)
},
useRouter: () => ({
push: vi.fn(),
}),
}))
vi.mock('@/lib/api-fetch', () => ({
apiFetch: vi.fn(),
}))
vi.mock('@/lib/home/create-project-launch', () => ({
createHomeProjectLaunch: vi.fn(),
}))
vi.mock('@/lib/home/ai-story-expand', () => ({
expandHomeStory: vi.fn(),
}))
describe('resolveTextareaTargetHeight', () => {
it('keeps the home quick-start input at least three rows tall', () => {
expect(resolveTextareaTargetHeight({
minHeight: 96,
maxHeight: 320,
scrollHeight: 54,
})).toBe(96)
})
it('caps the auto-resized height to the viewport ceiling', () => {
expect(resolveTextareaTargetHeight({
minHeight: 96,
maxHeight: 180,
scrollHeight: 240,
})).toBe(180)
})
})
describe('HomePage quick-start input', () => {
it('renders the homepage textarea with a default three-row height baseline', () => {
Reflect.set(globalThis, 'React', React)
const html = renderToStaticMarkup(createElement(HomePage))
expect(HOME_QUICK_START_MIN_ROWS).toBe(3)
expect(html).toContain('StoryInputComposer')
expect(html).toContain('data-min-rows="3"')
expect(html).toContain('data-textarea-class="px-0 pt-0 pb-3 align-top"')
})
})
+48
View File
@@ -0,0 +1,48 @@
import { afterEach, describe, expect, it, vi } from 'vitest'
import {
getImageGenerationCountConfig,
getImageGenerationCountOptions,
normalizeImageGenerationCount,
} from '@/lib/image-generation/count'
import {
getImageGenerationCount,
setImageGenerationCount,
} from '@/lib/image-generation/count-preference'
describe('image generation count helpers', () => {
afterEach(() => {
vi.unstubAllGlobals()
})
it('normalizes values within each scope range', () => {
expect(normalizeImageGenerationCount('character', 0)).toBe(1)
expect(normalizeImageGenerationCount('character', 8)).toBe(6)
expect(normalizeImageGenerationCount('storyboard-candidates', 0)).toBe(1)
expect(normalizeImageGenerationCount('storyboard-candidates', 9)).toBe(4)
})
it('returns ordered options for each scope', () => {
expect(getImageGenerationCountOptions('character')).toEqual([1, 2, 3, 4, 5, 6])
expect(getImageGenerationCountOptions('storyboard-candidates')).toEqual([1, 2, 3, 4])
})
it('reads and writes client preference with scope isolation', () => {
const localStorageMock = {
getItem: vi.fn((key: string) => {
if (key === getImageGenerationCountConfig('character').storageKey) return '5'
if (key === getImageGenerationCountConfig('location').storageKey) return '2'
return null
}),
setItem: vi.fn(),
}
vi.stubGlobal('window', { localStorage: localStorageMock })
expect(getImageGenerationCount('character')).toBe(5)
expect(getImageGenerationCount('location')).toBe(2)
expect(setImageGenerationCount('storyboard-candidates', 8)).toBe(4)
expect(localStorageMock.setItem).toHaveBeenCalledWith(
getImageGenerationCountConfig('storyboard-candidates').storageKey,
'4',
)
})
})
@@ -0,0 +1,101 @@
import { describe, expect, it } from 'vitest'
import {
countGeneratedImageSlots,
resolveDisplayImageSlots,
resolveGroupedImageSlotPhase,
resolveImageSlotPhase,
shouldShowImageSlotGrid,
} from '@/lib/image-generation/slot-state'
describe('image slot state', () => {
it('counts only slots with image urls', () => {
expect(countGeneratedImageSlots([
{ imageUrl: 'a.png' },
{ imageUrl: null },
{ imageUrl: 'b.png' },
])).toBe(2)
})
it('distinguishes generate and regenerate phases', () => {
expect(resolveImageSlotPhase({ imageUrl: null }, true)).toBe('generating')
expect(resolveImageSlotPhase({ imageUrl: 'a.png' }, true)).toBe('regenerating')
expect(resolveImageSlotPhase({ imageUrl: null }, false)).toBe('idle-empty')
expect(resolveImageSlotPhase({ imageUrl: 'a.png' }, false)).toBe('idle-filled')
})
it('keeps completed filled slots idle while the group still has empty pending slots', () => {
expect(resolveGroupedImageSlotPhase(
{ imageUrl: 'a.png' },
{ isGroupRunning: true, isSlotRunning: false, hasPendingEmptySlots: true },
)).toBe('idle-filled')
expect(resolveGroupedImageSlotPhase(
{ imageUrl: null },
{ isGroupRunning: true, isSlotRunning: true, hasPendingEmptySlots: true },
)).toBe('generating')
})
it('hides legacy empty slots when the location is idle', () => {
const displaySlots = resolveDisplayImageSlots([
{ imageUrl: 'a.png' },
{ imageUrl: null },
{ imageUrl: null },
], {
hasRunningTask: false,
requestedCount: 1,
})
expect(displaySlots).toHaveLength(1)
expect(displaySlots[0]?.imageUrl).toBe('a.png')
})
it('shows only one slot while running a single-image location generation', () => {
const displaySlots = resolveDisplayImageSlots([
{ imageUrl: null },
{ imageUrl: null },
{ imageUrl: null },
], {
hasRunningTask: true,
requestedCount: 1,
})
expect(displaySlots).toHaveLength(1)
})
it('shows requested placeholders while running a multi-image location generation', () => {
const displaySlots = resolveDisplayImageSlots([
{ imageUrl: 'a.png' },
{ imageUrl: null },
{ imageUrl: null },
{ imageUrl: null },
], {
hasRunningTask: true,
requestedCount: 4,
})
expect(displaySlots).toHaveLength(4)
})
it('shows slot grid only after generation is active or meaningful', () => {
expect(shouldShowImageSlotGrid({
totalSlotCount: 3,
generatedCount: 0,
hasRunningTask: false,
hasAnyError: false,
})).toBe(false)
expect(shouldShowImageSlotGrid({
totalSlotCount: 3,
generatedCount: 0,
hasRunningTask: true,
hasAnyError: false,
})).toBe(true)
expect(shouldShowImageSlotGrid({
totalSlotCount: 3,
generatedCount: 1,
hasRunningTask: false,
hasAnyError: false,
})).toBe(true)
})
})
+224
View File
@@ -0,0 +1,224 @@
import { afterAll, beforeEach, describe, expect, it, vi } from 'vitest'
const resolveModelSelectionOrSingleMock = vi.hoisted(() => vi.fn())
const getProviderConfigMock = vi.hoisted(() => vi.fn())
const getProviderKeyMock = vi.hoisted(() => vi.fn((providerId: string) => {
const marker = providerId.indexOf(':')
return marker === -1 ? providerId : providerId.slice(0, marker)
}))
const submitFalTaskMock = vi.hoisted(() => vi.fn())
const normalizeToOriginalMediaUrlMock = vi.hoisted(() => vi.fn(async (input: string) => {
if (input.startsWith('/')) {
return `http://localhost:3000${input}`
}
return input
}))
vi.mock('@/lib/api-config', () => ({
resolveModelSelectionOrSingle: resolveModelSelectionOrSingleMock,
getProviderConfig: getProviderConfigMock,
getProviderKey: getProviderKeyMock,
}))
vi.mock('@/lib/async-submit', () => ({
submitFalTask: submitFalTaskMock,
}))
vi.mock('@/lib/media/outbound-image', () => ({
normalizeToBase64ForGeneration: vi.fn(async (input: string) => input),
normalizeToOriginalMediaUrl: normalizeToOriginalMediaUrlMock,
}))
vi.mock('@/lib/logging/core', () => ({
logInfo: vi.fn(),
logError: vi.fn(),
createScopedLogger: vi.fn(() => ({
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
})),
}))
import { generateLipSync } from '@/lib/lipsync'
const POLICY_ENDPOINT = 'https://dashscope.aliyuncs.com/api/v1/uploads'
const SUBMIT_ENDPOINT = 'https://dashscope.aliyuncs.com/api/v1/services/aigc/image2video/video-synthesis'
const UPLOAD_HOST = 'https://upload.example.com'
function buildJsonResponse(payload: unknown, status = 200): Response {
return {
ok: status >= 200 && status < 300,
status,
headers: new Headers({
'content-type': 'application/json',
}),
text: async () => JSON.stringify(payload),
} as unknown as Response
}
function buildBinaryResponse(contentType: string, data: string): Response {
const bytes = new TextEncoder().encode(data)
return {
ok: true,
status: 200,
headers: new Headers({
'content-type': contentType,
}),
arrayBuffer: async () => bytes.buffer.slice(bytes.byteOffset, bytes.byteOffset + bytes.byteLength),
text: async () => '',
} as unknown as Response
}
describe('lip-sync bailian submit', () => {
const originalNextauthUrl = process.env.NEXTAUTH_URL
beforeEach(() => {
vi.clearAllMocks()
process.env.NEXTAUTH_URL = originalNextauthUrl
resolveModelSelectionOrSingleMock.mockResolvedValue({
provider: 'bailian',
modelId: 'videoretalk',
modelKey: 'bailian::videoretalk',
mediaType: 'lipsync',
})
getProviderConfigMock.mockResolvedValue({
id: 'bailian',
apiKey: 'bl-key',
})
})
afterAll(() => {
process.env.NEXTAUTH_URL = originalNextauthUrl
})
it('uploads local media to bailian temp storage then submits oss urls', async () => {
const fetchMock = vi.fn(async (input: RequestInfo | URL) => {
const url = String(input)
if (url.startsWith(`${POLICY_ENDPOINT}?action=getPolicy&model=videoretalk`)) {
return buildJsonResponse({
data: {
upload_host: UPLOAD_HOST,
upload_dir: 'dashscope-instant/upload-dir',
oss_access_key_id: 'ak',
policy: 'policy',
signature: 'sig',
},
})
}
if (url === 'http://localhost:3000/api/storage/sign?key=images%2Fdemo.mp4') {
return buildBinaryResponse('video/mp4', 'video-bytes')
}
if (url === 'http://localhost:3000/api/storage/sign?key=voice%2Fdemo.wav') {
return buildBinaryResponse('audio/wav', 'audio-bytes')
}
if (url === UPLOAD_HOST) {
return {
ok: true,
status: 200,
text: async () => '',
} as unknown as Response
}
if (url === SUBMIT_ENDPOINT) {
return buildJsonResponse({
output: {
task_id: 'task-123',
task_status: 'PENDING',
},
})
}
throw new Error(`unexpected fetch: ${url}`)
})
vi.stubGlobal('fetch', fetchMock as unknown as typeof fetch)
const result = await generateLipSync(
{
videoUrl: '/api/storage/sign?key=images%2Fdemo.mp4',
audioUrl: '/api/storage/sign?key=voice%2Fdemo.wav',
audioDurationMs: 3000,
videoDurationMs: 5000,
},
'user-1',
'bailian::videoretalk',
)
expect(resolveModelSelectionOrSingleMock).toHaveBeenCalledWith('user-1', 'bailian::videoretalk', 'lipsync')
expect(getProviderConfigMock).toHaveBeenCalledWith('user-1', 'bailian')
expect(normalizeToOriginalMediaUrlMock).toHaveBeenCalledWith('/api/storage/sign?key=images%2Fdemo.mp4')
expect(normalizeToOriginalMediaUrlMock).toHaveBeenCalledWith('/api/storage/sign?key=voice%2Fdemo.wav')
const submitCall = fetchMock.mock.calls.find(([input]) => String(input) === SUBMIT_ENDPOINT) as
| [RequestInfo | URL, RequestInit?]
| undefined
expect(submitCall).toBeDefined()
const submitInit = submitCall?.[1]
expect(submitInit).toBeDefined()
if (!submitInit) throw new Error('missing submit init')
expect(submitInit.method).toBe('POST')
expect(submitInit.headers).toEqual({
Authorization: 'Bearer bl-key',
'Content-Type': 'application/json',
'X-DashScope-Async': 'enable',
'X-DashScope-OssResourceResolve': 'enable',
})
const submitBody = JSON.parse(String(submitInit.body)) as {
model: string
input: { video_url: string; audio_url: string }
}
expect(submitBody.model).toBe('videoretalk')
expect(submitBody.input.video_url).toMatch(/^oss:\/\/dashscope-instant\/upload-dir\/video-/)
expect(submitBody.input.audio_url).toMatch(/^oss:\/\/dashscope-instant\/upload-dir\/audio-/)
const uploadCalls = fetchMock.mock.calls.filter(([input]) => String(input) === UPLOAD_HOST)
expect(uploadCalls.length).toBe(2)
expect(result).toEqual({
requestId: 'task-123',
externalId: 'BAILIAN:VIDEO:task-123',
async: true,
})
})
it('throws explicit error when bailian task id is missing', async () => {
const fetchMock = vi.fn(async (input: RequestInfo | URL) => {
const url = String(input)
if (url.startsWith(`${POLICY_ENDPOINT}?action=getPolicy&model=videoretalk`)) {
return buildJsonResponse({
data: {
upload_host: UPLOAD_HOST,
upload_dir: 'dashscope-instant/upload-dir',
oss_access_key_id: 'ak',
policy: 'policy',
signature: 'sig',
},
})
}
if (url === UPLOAD_HOST) {
return {
ok: true,
status: 200,
text: async () => '',
} as unknown as Response
}
if (url === SUBMIT_ENDPOINT) {
return buildJsonResponse({
output: {
task_status: 'PENDING',
},
})
}
throw new Error(`unexpected fetch: ${url}`)
})
vi.stubGlobal('fetch', fetchMock as unknown as typeof fetch)
await expect(generateLipSync(
{
videoUrl: 'data:video/mp4;base64,dmk=',
audioUrl: 'data:audio/wav;base64,YXU=',
audioDurationMs: 3000,
videoDurationMs: 5000,
},
'user-1',
'bailian::videoretalk',
)).rejects.toThrow('BAILIAN_LIPSYNC_TASK_ID_MISSING')
})
})
+199
View File
@@ -0,0 +1,199 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const normalizeToOriginalMediaUrlMock = vi.hoisted(() => vi.fn(async (input: string) => input))
const uploadObjectMock = vi.hoisted(() => vi.fn(async () => 'voice/temp/lip-sync-preprocessed/test.wav'))
const getSignedUrlMock = vi.hoisted(() => vi.fn(() => '/api/storage/sign?key=voice%2Ftemp%2Flip-sync-preprocessed%2Ftest.wav'))
const toFetchableUrlMock = vi.hoisted(() => vi.fn((input: string) => {
if (input.startsWith('http://') || input.startsWith('https://') || input.startsWith('data:')) return input
if (input.startsWith('/')) return `https://public.example.com${input}`
return input
}))
vi.mock('@/lib/media/outbound-image', () => ({
normalizeToOriginalMediaUrl: normalizeToOriginalMediaUrlMock,
}))
vi.mock('@/lib/storage', () => ({
uploadObject: uploadObjectMock,
getSignedUrl: getSignedUrlMock,
}))
vi.mock('@/lib/storage/utils', () => ({
toFetchableUrl: toFetchableUrlMock,
}))
vi.mock('@/lib/logging/core', () => ({
logInfo: vi.fn(),
}))
import {
LIPSYNC_PREPROCESS_AUDIO_MIN_MS,
preprocessLipSyncParams,
} from '@/lib/lipsync/preprocess'
function buildWav(durationMs: number, sampleRate = 16000): Buffer {
const numChannels = 1
const bitsPerSample = 16
const blockAlign = (numChannels * bitsPerSample) / 8
const byteRate = sampleRate * blockAlign
const dataSize = Math.max(blockAlign, Math.round((durationMs / 1000) * byteRate))
const buffer = Buffer.alloc(44 + dataSize)
buffer.write('RIFF', 0, 'ascii')
buffer.writeUInt32LE(36 + dataSize, 4)
buffer.write('WAVE', 8, 'ascii')
buffer.write('fmt ', 12, 'ascii')
buffer.writeUInt32LE(16, 16)
buffer.writeUInt16LE(1, 20)
buffer.writeUInt16LE(numChannels, 22)
buffer.writeUInt32LE(sampleRate, 24)
buffer.writeUInt32LE(byteRate, 28)
buffer.writeUInt16LE(blockAlign, 32)
buffer.writeUInt16LE(bitsPerSample, 34)
buffer.write('data', 36, 'ascii')
buffer.writeUInt32LE(dataSize, 40)
return buffer
}
function buildMp4WithDuration(durationMs: number): Buffer {
const timescale = 1000
const duration = Math.max(1, Math.round(durationMs))
const mvhdPayload = Buffer.alloc(4 + 4 + 4 + 4 + 4)
mvhdPayload.writeUInt8(0, 0)
mvhdPayload.writeUInt32BE(0, 4)
mvhdPayload.writeUInt32BE(0, 8)
mvhdPayload.writeUInt32BE(timescale, 12)
mvhdPayload.writeUInt32BE(duration, 16)
const mvhdSize = 8 + mvhdPayload.length
const mvhd = Buffer.alloc(mvhdSize)
mvhd.writeUInt32BE(mvhdSize, 0)
mvhd.write('mvhd', 4, 'ascii')
mvhdPayload.copy(mvhd, 8)
const moovSize = 8 + mvhd.length
const moov = Buffer.alloc(moovSize)
moov.writeUInt32BE(moovSize, 0)
moov.write('moov', 4, 'ascii')
mvhd.copy(moov, 8)
const ftyp = Buffer.alloc(24)
ftyp.writeUInt32BE(24, 0)
ftyp.write('ftyp', 4, 'ascii')
ftyp.write('isom', 8, 'ascii')
ftyp.writeUInt32BE(0x200, 12)
ftyp.write('isom', 16, 'ascii')
ftyp.write('mp41', 20, 'ascii')
return Buffer.concat([ftyp, moov])
}
function readWavDurationMs(buffer: Buffer): number {
const byteRate = buffer.readUInt32LE(28)
const dataSize = buffer.readUInt32LE(40)
return Math.round((dataSize / byteRate) * 1000)
}
function buildBinaryResponse(buffer: Buffer, contentType: string): Response {
return {
ok: true,
status: 200,
headers: new Headers({
'content-type': contentType,
}),
arrayBuffer: async () => buffer.buffer.slice(buffer.byteOffset, buffer.byteOffset + buffer.byteLength),
text: async () => '',
} as unknown as Response
}
describe('lipsync preprocess', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('pads short audio to minimum duration for fal', async () => {
const shortAudio = buildWav(1000)
const video = buildMp4WithDuration(5000)
const fetchMock = vi.fn(async (input: RequestInfo | URL) => {
const url = String(input)
if (url.includes('video.mp4')) return buildBinaryResponse(video, 'video/mp4')
if (url.includes('audio.wav')) return buildBinaryResponse(shortAudio, 'audio/wav')
throw new Error(`unexpected fetch: ${url}`)
})
vi.stubGlobal('fetch', fetchMock as unknown as typeof fetch)
const result = await preprocessLipSyncParams(
{
videoUrl: 'https://assets.example.com/video.mp4',
audioUrl: 'https://assets.example.com/audio.wav',
audioDurationMs: 1000,
},
{ providerKey: 'fal' },
)
expect(result.paddedAudio).toBe(true)
expect(result.trimmedAudio).toBe(false)
expect(result.params.audioUrl.startsWith('data:audio/wav;base64,')).toBe(true)
const base64 = result.params.audioUrl.slice('data:audio/wav;base64,'.length)
const paddedBuffer = Buffer.from(base64, 'base64')
expect(readWavDurationMs(paddedBuffer)).toBeGreaterThanOrEqual(LIPSYNC_PREPROCESS_AUDIO_MIN_MS)
expect(uploadObjectMock).not.toHaveBeenCalled()
})
it('trims audio to video duration for vidu and uploads processed audio', async () => {
const longAudio = buildWav(7000)
const video = buildMp4WithDuration(5000)
const fetchMock = vi.fn(async (input: RequestInfo | URL) => {
const url = String(input)
if (url.includes('video.mp4')) return buildBinaryResponse(video, 'video/mp4')
if (url.includes('audio.wav')) return buildBinaryResponse(longAudio, 'audio/wav')
throw new Error(`unexpected fetch: ${url}`)
})
vi.stubGlobal('fetch', fetchMock as unknown as typeof fetch)
const result = await preprocessLipSyncParams(
{
videoUrl: 'https://assets.example.com/video.mp4',
audioUrl: 'https://assets.example.com/audio.wav',
audioDurationMs: 7000,
},
{ providerKey: 'vidu' },
)
expect(result.paddedAudio).toBe(false)
expect(result.trimmedAudio).toBe(true)
expect(uploadObjectMock).toHaveBeenCalledTimes(1)
const uploadCall = uploadObjectMock.mock.calls[0] as unknown as [Buffer] | undefined
expect(uploadCall).toBeTruthy()
if (!uploadCall) throw new Error('expected uploadObject call')
const uploadedBuffer = uploadCall[0]
expect(readWavDurationMs(uploadedBuffer)).toBeLessThanOrEqual(5000)
expect(result.params.audioUrl).toBe('https://public.example.com/api/storage/sign?key=voice%2Ftemp%2Flip-sync-preprocessed%2Ftest.wav')
})
it('probes durations and keeps audio unchanged when no adjustment is needed', async () => {
const audio = buildWav(3000)
const video = buildMp4WithDuration(5000)
const fetchMock = vi.fn(async (input: RequestInfo | URL) => {
const url = String(input)
if (url.includes('video.mp4')) return buildBinaryResponse(video, 'video/mp4')
if (url.includes('audio.wav')) return buildBinaryResponse(audio, 'audio/wav')
throw new Error(`unexpected fetch: ${url}`)
})
vi.stubGlobal('fetch', fetchMock as unknown as typeof fetch)
const result = await preprocessLipSyncParams(
{
videoUrl: 'https://assets.example.com/video.mp4',
audioUrl: 'https://assets.example.com/audio.wav',
},
{ providerKey: 'bailian' },
)
expect(result.paddedAudio).toBe(false)
expect(result.trimmedAudio).toBe(false)
expect(result.params.audioUrl).toBe('https://assets.example.com/audio.wav')
expect(fetchMock).toHaveBeenCalled()
expect(uploadObjectMock).not.toHaveBeenCalled()
})
})
+22
View File
@@ -0,0 +1,22 @@
import { describe, expect, it } from 'vitest'
import { buildArkThinkingParam } from '@/lib/ark-llm'
describe('ark thinking param builder', () => {
it('builds enabled thinking param without reasoning_effort', () => {
const params = buildArkThinkingParam('doubao-seed-2-0-lite-260215', true)
expect(params).toEqual({
thinking: {
type: 'enabled',
},
})
})
it('builds disabled thinking param without reasoning_effort', () => {
const params = buildArkThinkingParam('doubao-seed-2-0-lite-260215', false)
expect(params).toEqual({
thinking: {
type: 'disabled',
},
})
})
})
@@ -0,0 +1,124 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const resolveLlmRuntimeModelMock = vi.hoisted(() =>
vi.fn(async () => ({
provider: 'bailian',
modelId: 'qwen3.5-flash',
modelKey: 'bailian::qwen3.5-flash',
})),
)
const completeBailianLlmMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'chatcmpl_mock',
object: 'chat.completion',
created: 1,
model: 'qwen3.5-flash',
choices: [
{
index: 0,
message: { role: 'assistant', content: 'ok' },
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 1,
completion_tokens: 1,
total_tokens: 2,
},
})),
)
const completeSiliconFlowLlmMock = vi.hoisted(() =>
vi.fn(async () => {
throw new Error('siliconflow should not be called')
}),
)
const runOpenAICompatChatCompletionMock = vi.hoisted(() =>
vi.fn(async () => {
throw new Error('openai-compat should not be called')
}),
)
const getProviderConfigMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'bailian',
name: 'Alibaba Bailian',
apiKey: 'bl-key',
baseUrl: undefined,
gatewayRoute: 'official' as const,
})),
)
const llmLoggerInfoMock = vi.hoisted(() => vi.fn())
const llmLoggerWarnMock = vi.hoisted(() => vi.fn())
const logLlmRawInputMock = vi.hoisted(() => vi.fn())
const logLlmRawOutputMock = vi.hoisted(() => vi.fn())
const recordCompletionUsageMock = vi.hoisted(() => vi.fn())
vi.mock('@/lib/llm-observe/internal-stream-context', () => ({
getInternalLLMStreamCallbacks: vi.fn(() => null),
}))
vi.mock('@/lib/model-gateway', () => ({
resolveModelGatewayRoute: vi.fn(() => 'official'),
runOpenAICompatChatCompletion: runOpenAICompatChatCompletionMock,
}))
vi.mock('@/lib/api-config', () => ({
getProviderConfig: getProviderConfigMock,
getProviderKey: vi.fn((providerId: string) => providerId),
}))
vi.mock('@/lib/providers/bailian', () => ({
completeBailianLlm: completeBailianLlmMock,
}))
vi.mock('@/lib/providers/siliconflow', () => ({
completeSiliconFlowLlm: completeSiliconFlowLlmMock,
}))
vi.mock('@/lib/llm/runtime-shared', () => ({
_ulogError: vi.fn(),
_ulogWarn: vi.fn(),
completionUsageSummary: vi.fn(() => ({ promptTokens: 1, completionTokens: 1 })),
isRetryableError: vi.fn(() => false),
llmLogger: {
info: llmLoggerInfoMock,
warn: llmLoggerWarnMock,
},
logLlmRawInput: logLlmRawInputMock,
logLlmRawOutput: logLlmRawOutputMock,
recordCompletionUsage: recordCompletionUsageMock,
resolveLlmRuntimeModel: resolveLlmRuntimeModelMock,
}))
import { chatCompletion } from '@/lib/llm/chat-completion'
describe('llm chatCompletion official provider branch', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('returns completion from bailian official provider without falling through to baseUrl checks', async () => {
const result = await chatCompletion(
'user-1',
'bailian::qwen3.5-flash',
[{ role: 'user', content: 'hello' }],
{ temperature: 0.1 },
)
expect(completeBailianLlmMock).toHaveBeenCalledWith({
modelId: 'qwen3.5-flash',
messages: [{ role: 'user', content: 'hello' }],
apiKey: 'bl-key',
baseUrl: undefined,
temperature: 0.1,
})
expect(runOpenAICompatChatCompletionMock).not.toHaveBeenCalled()
expect(completeSiliconFlowLlmMock).not.toHaveBeenCalled()
expect(result.choices[0]?.message?.content).toBe('ok')
expect(recordCompletionUsageMock).toHaveBeenCalledTimes(1)
})
})
@@ -0,0 +1,158 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
type MockRuntimeModel = {
provider: string
modelId: string
modelKey: string
llmProtocol: 'responses' | 'chat-completions' | undefined
}
const resolveLlmRuntimeModelMock = vi.hoisted(() =>
vi.fn<(...args: unknown[]) => Promise<MockRuntimeModel>>(async () => ({
provider: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
modelKey: 'openai-compatible:node-1::gpt-4.1-mini',
llmProtocol: 'responses',
})),
)
const runOpenAICompatResponsesCompletionMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'chatcmpl_responses_1',
object: 'chat.completion',
created: 1,
model: 'gpt-4.1-mini',
choices: [{ index: 0, message: { role: 'assistant', content: 'responses-ok' }, finish_reason: 'stop' }],
usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 },
})),
)
const runOpenAICompatChatCompletionMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'chatcmpl_chat_1',
object: 'chat.completion',
created: 1,
model: 'gpt-4.1-mini',
choices: [{ index: 0, message: { role: 'assistant', content: 'chat-ok' }, finish_reason: 'stop' }],
usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 },
})),
)
const getProviderConfigMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'openai-compatible:node-1',
name: 'OpenAI Compatible',
apiKey: 'sk-test',
baseUrl: 'https://compat.example.com/v1',
gatewayRoute: 'openai-compat' as const,
apiMode: 'openai-official' as const,
})),
)
const logLlmRawInputMock = vi.hoisted(() => vi.fn())
const logLlmRawOutputMock = vi.hoisted(() => vi.fn())
const recordCompletionUsageMock = vi.hoisted(() => vi.fn())
vi.mock('@/lib/llm-observe/internal-stream-context', () => ({
getInternalLLMStreamCallbacks: vi.fn(() => null),
}))
vi.mock('@/lib/model-gateway', () => ({
resolveModelGatewayRoute: vi.fn(() => 'openai-compat'),
runOpenAICompatChatCompletion: runOpenAICompatChatCompletionMock,
runOpenAICompatResponsesCompletion: runOpenAICompatResponsesCompletionMock,
}))
vi.mock('@/lib/api-config', () => ({
getProviderConfig: getProviderConfigMock,
getProviderKey: vi.fn((providerId: string) => providerId.split(':')[0] || providerId),
}))
vi.mock('@/lib/providers/bailian', () => ({
completeBailianLlm: vi.fn(async () => {
throw new Error('bailian should not be called')
}),
}))
vi.mock('@/lib/providers/siliconflow', () => ({
completeSiliconFlowLlm: vi.fn(async () => {
throw new Error('siliconflow should not be called')
}),
}))
vi.mock('@/lib/llm/runtime-shared', () => ({
_ulogError: vi.fn(),
_ulogWarn: vi.fn(),
completionUsageSummary: vi.fn(() => ({ promptTokens: 1, completionTokens: 1 })),
isRetryableError: vi.fn(() => false),
llmLogger: {
info: vi.fn(),
warn: vi.fn(),
},
logLlmRawInput: logLlmRawInputMock,
logLlmRawOutput: logLlmRawOutputMock,
recordCompletionUsage: recordCompletionUsageMock,
resolveLlmRuntimeModel: resolveLlmRuntimeModelMock,
}))
import { chatCompletion } from '@/lib/llm/chat-completion'
describe('llm chatCompletion openai-compatible protocol routing', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('uses responses executor when llmProtocol=responses', async () => {
const completion = await chatCompletion(
'user-1',
'openai-compatible:node-1::gpt-4.1-mini',
[{ role: 'user', content: 'hello' }],
{ temperature: 0.2 },
)
expect(runOpenAICompatResponsesCompletionMock).toHaveBeenCalledTimes(1)
expect(runOpenAICompatChatCompletionMock).not.toHaveBeenCalled()
expect(completion.choices[0]?.message?.content).toBe('responses-ok')
})
it('uses chat-completions executor when llmProtocol=chat-completions', async () => {
resolveLlmRuntimeModelMock.mockResolvedValueOnce({
provider: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
modelKey: 'openai-compatible:node-1::gpt-4.1-mini',
llmProtocol: 'chat-completions',
})
const completion = await chatCompletion(
'user-1',
'openai-compatible:node-1::gpt-4.1-mini',
[{ role: 'user', content: 'hello' }],
{ temperature: 0.2 },
)
expect(runOpenAICompatChatCompletionMock).toHaveBeenCalledTimes(1)
expect(runOpenAICompatResponsesCompletionMock).not.toHaveBeenCalled()
expect(completion.choices[0]?.message?.content).toBe('chat-ok')
})
it('fails fast when llmProtocol is missing for openai-compatible model', async () => {
resolveLlmRuntimeModelMock.mockResolvedValueOnce({
provider: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
modelKey: 'openai-compatible:node-1::gpt-4.1-mini',
llmProtocol: undefined,
})
await expect(
chatCompletion(
'user-1',
'openai-compatible:node-1::gpt-4.1-mini',
[{ role: 'user', content: 'hello' }],
{ temperature: 0.2, maxRetries: 0 },
),
).rejects.toThrow('MODEL_LLM_PROTOCOL_REQUIRED')
expect(runOpenAICompatChatCompletionMock).not.toHaveBeenCalled()
expect(runOpenAICompatResponsesCompletionMock).not.toHaveBeenCalled()
})
})
@@ -0,0 +1,129 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
const resolveLlmRuntimeModelMock = vi.hoisted(() =>
vi.fn(async () => ({
provider: 'bailian',
modelId: 'qwen3.5-plus',
modelKey: 'bailian::qwen3.5-plus',
})),
)
const completeBailianLlmMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'chatcmpl_stream_mock',
object: 'chat.completion',
created: 1,
model: 'qwen3.5-plus',
choices: [
{
index: 0,
message: { role: 'assistant', content: 'stream-ok' },
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 2,
completion_tokens: 2,
total_tokens: 4,
},
})),
)
const completeSiliconFlowLlmMock = vi.hoisted(() =>
vi.fn(async () => {
throw new Error('siliconflow should not be called')
}),
)
const runOpenAICompatChatCompletionMock = vi.hoisted(() =>
vi.fn(async () => {
throw new Error('openai-compat should not be called')
}),
)
const getProviderConfigMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'bailian',
name: 'Alibaba Bailian',
apiKey: 'bl-key',
baseUrl: undefined,
gatewayRoute: 'official' as const,
})),
)
const logLlmRawInputMock = vi.hoisted(() => vi.fn())
const logLlmRawOutputMock = vi.hoisted(() => vi.fn())
const recordCompletionUsageMock = vi.hoisted(() => vi.fn())
vi.mock('@/lib/model-gateway', () => ({
resolveModelGatewayRoute: vi.fn(() => 'official'),
runOpenAICompatChatCompletion: runOpenAICompatChatCompletionMock,
}))
vi.mock('@/lib/api-config', () => ({
getProviderConfig: getProviderConfigMock,
getProviderKey: vi.fn((providerId: string) => providerId),
}))
vi.mock('@/lib/providers/bailian', () => ({
completeBailianLlm: completeBailianLlmMock,
}))
vi.mock('@/lib/providers/siliconflow', () => ({
completeSiliconFlowLlm: completeSiliconFlowLlmMock,
}))
vi.mock('@/lib/llm/runtime-shared', () => ({
completionUsageSummary: vi.fn(() => ({ promptTokens: 2, completionTokens: 2 })),
llmLogger: {
info: vi.fn(),
warn: vi.fn(),
},
logLlmRawInput: logLlmRawInputMock,
logLlmRawOutput: logLlmRawOutputMock,
recordCompletionUsage: recordCompletionUsageMock,
resolveLlmRuntimeModel: resolveLlmRuntimeModelMock,
}))
import { chatCompletionStream } from '@/lib/llm/chat-stream'
describe('llm chatCompletionStream official provider branch', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('streams from bailian completion result and exits early', async () => {
const onChunk = vi.fn()
const onComplete = vi.fn()
const completion = await chatCompletionStream(
'user-1',
'bailian::qwen3.5-plus',
[{ role: 'user', content: 'hello' }],
{},
{
onChunk,
onComplete,
},
)
expect(completeBailianLlmMock).toHaveBeenCalledWith({
modelId: 'qwen3.5-plus',
messages: [{ role: 'user', content: 'hello' }],
apiKey: 'bl-key',
baseUrl: undefined,
temperature: 0.7,
})
expect(runOpenAICompatChatCompletionMock).not.toHaveBeenCalled()
expect(completeSiliconFlowLlmMock).not.toHaveBeenCalled()
expect(onComplete).toHaveBeenCalledWith('stream-ok', undefined)
expect(onChunk).toHaveBeenCalledWith(
expect.objectContaining({
kind: 'text',
delta: 'stream-ok',
}),
)
expect(completion.choices[0]?.message?.content).toBe('stream-ok')
expect(recordCompletionUsageMock).toHaveBeenCalledTimes(1)
})
})
@@ -0,0 +1,156 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
type MockRuntimeModel = {
provider: string
modelId: string
modelKey: string
llmProtocol: 'responses' | 'chat-completions' | undefined
}
const resolveLlmRuntimeModelMock = vi.hoisted(() =>
vi.fn<(...args: unknown[]) => Promise<MockRuntimeModel>>(async () => ({
provider: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
modelKey: 'openai-compatible:node-1::gpt-4.1-mini',
llmProtocol: 'responses',
})),
)
const runOpenAICompatResponsesCompletionMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'chatcmpl_responses_1',
object: 'chat.completion',
created: 1,
model: 'gpt-4.1-mini',
choices: [{ index: 0, message: { role: 'assistant', content: 'responses-stream' }, finish_reason: 'stop' }],
usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 },
})),
)
const runOpenAICompatChatCompletionMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'chatcmpl_chat_1',
object: 'chat.completion',
created: 1,
model: 'gpt-4.1-mini',
choices: [{ index: 0, message: { role: 'assistant', content: 'chat-stream' }, finish_reason: 'stop' }],
usage: { prompt_tokens: 1, completion_tokens: 1, total_tokens: 2 },
})),
)
const getProviderConfigMock = vi.hoisted(() =>
vi.fn(async () => ({
id: 'openai-compatible:node-1',
name: 'OpenAI Compatible',
apiKey: 'sk-test',
baseUrl: 'https://compat.example.com/v1',
gatewayRoute: 'openai-compat' as const,
apiMode: 'openai-official' as const,
})),
)
const logLlmRawInputMock = vi.hoisted(() => vi.fn())
const logLlmRawOutputMock = vi.hoisted(() => vi.fn())
const recordCompletionUsageMock = vi.hoisted(() => vi.fn())
vi.mock('@/lib/model-gateway', () => ({
resolveModelGatewayRoute: vi.fn(() => 'openai-compat'),
runOpenAICompatChatCompletion: runOpenAICompatChatCompletionMock,
runOpenAICompatResponsesCompletion: runOpenAICompatResponsesCompletionMock,
}))
vi.mock('@/lib/api-config', () => ({
getProviderConfig: getProviderConfigMock,
getProviderKey: vi.fn((providerId: string) => providerId.split(':')[0] || providerId),
}))
vi.mock('@/lib/providers/bailian', () => ({
completeBailianLlm: vi.fn(async () => {
throw new Error('bailian should not be called')
}),
}))
vi.mock('@/lib/providers/siliconflow', () => ({
completeSiliconFlowLlm: vi.fn(async () => {
throw new Error('siliconflow should not be called')
}),
}))
vi.mock('@/lib/llm/runtime-shared', () => ({
completionUsageSummary: vi.fn(() => ({ promptTokens: 1, completionTokens: 1 })),
llmLogger: {
info: vi.fn(),
warn: vi.fn(),
},
logLlmRawInput: logLlmRawInputMock,
logLlmRawOutput: logLlmRawOutputMock,
recordCompletionUsage: recordCompletionUsageMock,
resolveLlmRuntimeModel: resolveLlmRuntimeModelMock,
}))
import { chatCompletionStream } from '@/lib/llm/chat-stream'
describe('llm chatCompletionStream openai-compatible protocol routing', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('uses responses executor when llmProtocol=responses', async () => {
const onChunk = vi.fn()
const completion = await chatCompletionStream(
'user-1',
'openai-compatible:node-1::gpt-4.1-mini',
[{ role: 'user', content: 'hello' }],
{ temperature: 0.2 },
{ onChunk },
)
expect(runOpenAICompatResponsesCompletionMock).toHaveBeenCalledTimes(1)
expect(runOpenAICompatChatCompletionMock).not.toHaveBeenCalled()
expect(completion.choices[0]?.message?.content).toBe('responses-stream')
expect(onChunk).toHaveBeenCalled()
})
it('uses chat-completions executor when llmProtocol=chat-completions', async () => {
resolveLlmRuntimeModelMock.mockResolvedValueOnce({
provider: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
modelKey: 'openai-compatible:node-1::gpt-4.1-mini',
llmProtocol: 'chat-completions',
})
const completion = await chatCompletionStream(
'user-1',
'openai-compatible:node-1::gpt-4.1-mini',
[{ role: 'user', content: 'hello' }],
{ temperature: 0.2 },
undefined,
)
expect(runOpenAICompatChatCompletionMock).toHaveBeenCalledTimes(1)
expect(runOpenAICompatResponsesCompletionMock).not.toHaveBeenCalled()
expect(completion.choices[0]?.message?.content).toBe('chat-stream')
})
it('fails fast when llmProtocol is missing for openai-compatible model', async () => {
resolveLlmRuntimeModelMock.mockResolvedValueOnce({
provider: 'openai-compatible:node-1',
modelId: 'gpt-4.1-mini',
modelKey: 'openai-compatible:node-1::gpt-4.1-mini',
llmProtocol: undefined,
})
await expect(
chatCompletionStream(
'user-1',
'openai-compatible:node-1::gpt-4.1-mini',
[{ role: 'user', content: 'hello' }],
{ temperature: 0.2 },
undefined,
),
).rejects.toThrow('MODEL_LLM_PROTOCOL_REQUIRED')
expect(runOpenAICompatChatCompletionMock).not.toHaveBeenCalled()
expect(runOpenAICompatResponsesCompletionMock).not.toHaveBeenCalled()
})
})
@@ -0,0 +1,50 @@
import type OpenAI from 'openai'
import { describe, expect, it } from 'vitest'
import { getCompletionParts } from '@/lib/llm/completion-parts'
function buildCompletion(content: string): OpenAI.Chat.Completions.ChatCompletion {
return {
id: 'chatcmpl_test',
object: 'chat.completion',
created: 1,
model: 'minimax-m2.5',
choices: [
{
index: 0,
message: {
role: 'assistant',
content,
},
finish_reason: 'stop',
},
],
} as OpenAI.Chat.Completions.ChatCompletion
}
describe('llm completion parts think-tag parsing', () => {
it('splits think tag content into reasoning and clean text', () => {
const completion = buildCompletion(`<think>
让我分析这段文本,筛选出需要制作画面的场景。
</think>
{
"locations": []
}`)
const parts = getCompletionParts(completion)
expect(parts.reasoning).toContain('让我分析这段文本')
expect(parts.text).toBe(`{
"locations": []
}`)
})
it('keeps plain content untouched when no think tag exists', () => {
const completion = buildCompletion('{ "locations": [] }')
const parts = getCompletionParts(completion)
expect(parts.reasoning).toBe('')
expect(parts.text).toBe('{ "locations": [] }')
})
})
@@ -0,0 +1,41 @@
import { describe, expect, it } from 'vitest'
import {
isLikelyOpenAIReasoningModel,
shouldUseOpenAIReasoningProviderOptions,
} from '@/lib/llm/reasoning-capability'
describe('llm/reasoning-capability', () => {
it('identifies likely OpenAI reasoning model ids', () => {
expect(isLikelyOpenAIReasoningModel('o3-mini')).toBe(true)
expect(isLikelyOpenAIReasoningModel('gpt-5.2')).toBe(true)
expect(isLikelyOpenAIReasoningModel('claude-sonnet-4-6')).toBe(false)
})
it('enables reasoning provider options for native openai provider', () => {
expect(shouldUseOpenAIReasoningProviderOptions({
providerKey: 'openai',
modelId: 'gpt-5.2',
})).toBe(true)
})
it('enables reasoning provider options for openai-compatible only when apiMode is openai-official', () => {
expect(shouldUseOpenAIReasoningProviderOptions({
providerKey: 'openai-compatible',
providerApiMode: 'openai-official',
modelId: 'gpt-5.2',
})).toBe(true)
expect(shouldUseOpenAIReasoningProviderOptions({
providerKey: 'openai-compatible',
modelId: 'gpt-5.2',
})).toBe(false)
})
it('disables reasoning provider options for non-openai models even on openai-compatible gateways', () => {
expect(shouldUseOpenAIReasoningProviderOptions({
providerKey: 'openai-compatible',
providerApiMode: 'openai-official',
modelId: 'claude-sonnet-4-6',
})).toBe(false)
})
})
@@ -0,0 +1,14 @@
import { describe, expect, it } from 'vitest'
import { formatLocationAvailableSlotsText } from '@/lib/location-available-slots'
describe('location available slots', () => {
it('formats english slot headers without leaking chinese labels', () => {
const text = formatLocationAvailableSlotsText(
['left side near the wall'],
'en',
)
expect(text).toBe('Available character slots:\n- left side near the wall')
expect(text).not.toContain('可站位置:')
})
})

Some files were not shown because too many files have changed in this diff Show More