我已为你在画布里创建了一个 可预览的 React 单文件项目(App.jsx),模仿 Deepseek 官网风格的 AI 聊天前端。

// Deepseek-style AI Chat — Single-file React component (App.jsx)
// Usage:
// 1. Create a React app (Vite + React + Tailwind). Install dependencies: react, react-dom, tailwindcss, framer-motion (optional).
// 2. Save this file as src/App.jsx and ensure Tailwind is configured.
// 3. The component expects a backend proxy endpoint at /api/generate that forwards requests to your LLM provider
// (Ollama / OpenAI / Tars). Example proxy payload: { model, messages } and response: { id, text } or streaming SSE.
// 4. Quick proxy examples:
// – Ollama local: proxy POST /api/generate -> POST http://localhost:11434/completions (or your ollama endpoint) mapping body.
// – OpenAI: proxy POST /api/generate -> call OpenAI Chat Completions and return simplified JSON.
// 5. This file focuses on frontend UI & UX: chat list, streaming fallback, prompt templates, model selector, responsive layout.

import React, { useEffect, useRef, useState } from ‘react’;
import { motion, AnimatePresence } from ‘framer-motion’;

const DEFAULT_MODELS = [
{ id: ‘deepseek-r1:7b’, name: ‘Deepseek R1 (7B)’ },
{ id: ‘deepseek-r1:1.5b’, name: ‘Deepseek R1 (1.5B)’ },
{ id: ‘qwen2.5:7b’, name: ‘Qwen-2.5 (7B)’ },
{ id: ‘local-ollama’, name: ‘Local Ollama’ },
];

const PROMPT_TEMPLATES = [
{ id: ‘summarize’, title: ‘Summarize’, template: ‘请帮我把下面内容总结成 5 条要点:\n\n{{input}}’ },
{ id: ‘rewrite’, title: ‘Rewrite (Polish)’, template: ‘请把下面文字改写为更流畅、更专业的中文:\n\n{{input}}’ },
{ id: ‘persona’, title: ‘Persona — Researcher’, template: ‘你是一个严谨的历史研究员。基于下面的片段给出 200 字内的专业评论:\n\n{{input}}’ },
];

function useAutoScroll(dep) {
const ref = useRef();
useEffect(() => {
if (ref.current) {
ref.current.scrollTop = ref.current.scrollHeight;
}
}, [dep]);
return ref;
}

export default function App() {
const [models] = useState(DEFAULT_MODELS);
const [selectedModel, setSelectedModel] = useState(models[0].id);
const [messages, setMessages] = useState(() => [
{ id: ‘s1’, role: ‘assistant’, text: ‘欢迎!我可以帮你模拟 Deepseek 风格的问答。输入问题开始吧。’ },
]);
const [input, setInput] = useState(”);
const [loading, setLoading] = useState(false);
const [isSidebarOpen, setSidebarOpen] = useState(true);
const [temperature, setTemperature] = useState(0.2);
const [streaming, setStreaming] = useState(true);
const [systemPrompt, setSystemPrompt] = useState(‘你是一名助理,回答尽量简洁并给出参考要点。’);
const scrollRef = useAutoScroll(messages);

useEffect(() => {
// focus input on mount
const el = document.getElementById(‘chat-input’);
if (el) el.focus();
}, []);

function appendMessage(msg) {
setMessages((m) => […m, msg]);
}

async function handleSendMessage(templateApplied = null) {
const textToSend = templateApplied
? templateApplied.replace(/{{input}}/g, input)
: input;

if (!textToSend.trim()) return;

const userId = `u_${Date.now()}`;
const userMsg = { id: userId, role: 'user', text: textToSend };
appendMessage(userMsg);
setInput('');

const assistantId = `a_${Date.now()}`;
const assistantMsg = { id: assistantId, role: 'assistant', text: '', streaming: streaming };
appendMessage(assistantMsg);
setLoading(true);

try {
  // Prepare payload for backend proxy
  const payload = {
    model: selectedModel,
    temperature,
    messages: [
      { role: 'system', content: systemPrompt },
      ...messages.filter((m) => m.role !== 'assistant' || m.text).map((m) => ({ role: m.role, content: m.text })),
      { role: 'user', content: textToSend },
    ],
    stream: streaming,
  };

  // POST to our proxy endpoint. Replace /api/generate with your proxy.
  const res = await fetch('/api/generate', {
    method: 'POST',
    headers: { 'Content-Type': 'application/json' },
    body: JSON.stringify(payload),
  });

  if (!res.ok) {
    const txt = await res.text();
    throw new Error(txt || '模型调用失败');
  }

  if (streaming && res.body) {
    // Stream tokens
    const reader = res.body.getReader();
    const decoder = new TextDecoder();
    let done = false;
    let collected = '';
    while (!done) {
      const { value, done: d } = await reader.read();
      done = d;
      if (value) {
        const chunk = decoder.decode(value, { stream: true });
        collected += chunk;
        // Assume server sends raw text chunks (or JSON lines). Try to parse gracefully.
        // We'll append chunk to last assistant message.
        setMessages((cur) => {
          const copy = [...cur];
          const idx = copy.findIndex((x) => x.id === assistantId);
          if (idx >= 0) {
            copy[idx] = { ...copy[idx], text: (copy[idx].text || '') + chunk };
          }
          return copy;
        });
      }
    }
  } else {
    // Non-streaming JSON response
    const data = await res.json();
    const answer = data.text ?? data.answer ?? (Array.isArray(data.choices) ? data.choices[0].message.content : '');
    setMessages((cur) => cur.map((m) => (m.id === assistantId ? { ...m, text: answer } : m)));
  }

} catch (err) {
  console.error(err);
  setMessages((cur) => cur.map((m) => (m.id === assistantId ? { ...m, text: '发生错误:' + err.message } : m)));
} finally {
  setLoading(false);
}

}

function applyTemplate(tpl) {
const replaced = tpl.template.replace(‘{{input}}’, input || ”);
handleSendMessage(replaced);
}

function clearConversation() {
setMessages([{ id: ‘s1’, role: ‘assistant’, text: ‘欢迎!我可以帮你模拟 Deepseek 风格的问答。输入问题开始吧。’ }]);
}

return (

{/* Sidebar */} col-span-1 p-3 rounded-xl bg-white/80 dark:bg-gray-800/60 shadow-lg ${isSidebarOpen ? ” : ‘hidden md:block’}}>

Deepseek 风格

setSidebarOpen((s) => !s)}> {isSidebarOpen ? ‘收起’ : ‘展开’}

      <div className="mb-4">
        <label className="block text-xs">模型</label>
        <select
          value={selectedModel}
          onChange={(e) => setSelectedModel(e.target.value)}
          className="w-full mt-1 p-2 rounded border bg-transparent text-sm">
          {models.map((m) => (
            <option key={m.id} value={m.id}>{m.name}</option>
          ))}
        </select>
      </div>

      <div className="mb-4">
        <label className="block text-xs">温度 {temperature}</label>
        <input type="range" min="0" max="1" step="0.05" value={temperature}
          onChange={(e) => setTemperature(Number(e.target.value))} className="w-full" />
      </div>

      <div className="mb-4">
        <label className="block text-xs">Prompt(系统)</label>
        <textarea value={systemPrompt} onChange={(e) => setSystemPrompt(e.target.value)} rows={3}
          className="w-full mt-1 p-2 rounded border bg-transparent text-sm" />
      </div>

      <div className="mb-4">
        <h3 className="text-sm font-medium mb-2">模板</h3>
        <div className="space-y-2">
          {PROMPT_TEMPLATES.map((t) => (
            <button key={t.id} onClick={() => applyTemplate(t)}
              className="w-full text-left p-2 rounded hover:bg-gray-100 dark:hover:bg-gray-700">
              <div className="text-sm font-semibold">{t.title}</div>
              <div className="text-xs text-gray-500 dark:text-gray-400 truncate">{t.template.replace('{{input}}', '...')}</div>
            </button>
          ))}
        </div>
      </div>

      <div className="flex gap-2">
        <button onClick={clearConversation} className="flex-1 py-2 rounded bg-red-50 text-red-700 text-sm">清除</button>
        <button onClick={() => window.location.reload()} className="flex-1 py-2 rounded bg-blue-50 text-blue-700 text-sm">重载</button>
      </div>
    </aside>

    {/* Chat area */}
    <main className="col-span-3 md:col-span-3 bg-white/60 dark:bg-gray-800/60 rounded-xl shadow-lg flex flex-col">
      <header className="p-4 border-b border-gray-200 dark:border-gray-700 flex items-center justify-between">
        <div>
          <h1 className="text-xl font-bold">Deepseek 风格 AI 聊天(前端学习)</h1>
          <p className="text-sm text-gray-500 dark:text-gray-400">模拟官网风格的交互体验 — 可接 Ollama / OpenAI / 自建代理</p>
        </div>
        <div className="flex items-center gap-3">
          <label className="text-sm">流式:</label>
          <input type="checkbox" checked={streaming} onChange={(e) => setStreaming(e.target.checked)} />
        </div>
      </header>

      <section className="flex-1 overflow-hidden flex flex-col">
        <div ref={scrollRef} className="p-4 overflow-y-auto flex-1 space-y-4">
          <AnimatePresence initial={false} mode="wait">
            {messages.map((m) => (
              <motion.div key={m.id} initial={{ opacity: 0, y: 6 }} animate={{ opacity: 1, y: 0 }} exit={{ opacity: 0 }}>
                <MessageBubble message={m} />
              </motion.div>
            ))}
          </AnimatePresence>
        </div>

        <div className="p-4 border-t border-gray-200 dark:border-gray-700 bg-gradient-to-t from-white/50 dark:from-gray-800/50">
          <div className="flex items-start gap-3">
            <textarea id="chat-input" className="flex-1 p-3 rounded border bg-transparent resize-none" rows={2}
              value={input} onChange={(e) => setInput(e.target.value)} placeholder="输入问题,按 Ctrl+Enter 发送。" onKeyDown={(e) => {
                if ((e.ctrlKey || e.metaKey) && e.key === 'Enter') handleSendMessage();
              }} />

            <div className="w-40 flex flex-col gap-2">
              <button onClick={() => handleSendMessage(null)} className="py-2 px-3 rounded bg-blue-600 text-white">发送</button>
              <button onClick={() => setInput('')} className="py-2 px-3 rounded border">清空</button>
            </div>
          </div>

          <div className="mt-2 flex items-center justify-between text-xs text-gray-500">
            <div>模型: <b>{selectedModel}</b></div>
            <div>{loading ? '模型处理中...' : '就绪'}</div>
          </div>
        </div>
      </section>
    </main>
  </div>
</div>

);
}

function MessageBubble({ message }) {
const isUser = message.role === ‘user’;
return (

flex ${isUser ? ‘justify-end’ : ‘justify-start’}}>

${isUser ? ‘bg-blue-600 text-white’ : ‘bg-gray-100 dark:bg-gray-700 text-gray-900 dark:text-gray-100’} max-w-[78%] p-3 rounded-lg}>

{message.text}

{isUser ? ‘用户’ : ‘助手’}
);
}
文件包含:

  • 完整的 UI(侧边栏、模型选择、模板、聊天窗口、输入框)
  • 流式/非流式两种模型调用示例前端逻辑(需要后端代理 /api/generate
  • Tailwind + Framer Motion 的样式与小动画
  • 发送、清除、模板应用、系统 prompt、温度调节等功能