微立顶科技

新闻资讯

创新 服务 价值

  Qwen 最新开源全链路模型:ASR-LLM-知识库-TTS

发布日期:2026/3/30 8:54:59      浏览量:

Qwen 最新开源全链路模型:ASR-LLM-知识库-TTS



一、最新 Qwen 模型选型(全链路、全开源、本地 GPU)

1. ASR 语音识别(最新:Qwen3-ASR)

  • 推荐Qwen3-ASR-1.7B(高精度、多语言、抗噪)
  • 备选:Qwen3-ASR-0.6B(轻量、低延迟、高并发)
  • 下载地址:modelscope://qwen/Qwen3-ASR-1.7B

2. LLM 对话核心(最新:Qwen3.5 系列)

  • 服务器推荐Qwen3.5-9B-Chat(性能强、显存友好)
  • 轻量版:Qwen3.5-4B-Chat(4B,适合 8G 显存)
  • 下载地址:modelscope://qwen/Qwen3.5-9B-Chat

3. Embedding 知识库(最新:Qwen-Embedding-3)

  • 推荐Qwen-Embedding-3-Large(最新、向量质量最高)
  • 下载地址:modelscope://qwen/Qwen-Embedding-3-Large

4. TTS 语音合成(最新:Qwen3-TTS)

  • 推荐Qwen3-TTS-1.5B(高保真、流式、支持克隆)
  • 下载地址:modelscope://qwen/Qwen3-TTS-1.5B
二、服务器环境(GPU 版,一键安装)


# 创建环境
conda create -n qwen3.5 python=3.10
conda activate qwen3.5

# CUDA 12.1 PyTorch
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121

# 依赖(最新版)
pip install transformers==4.43.0 modelscope accelerate bitsandbytes \
faiss-gpu soundfile librosa uvicorn fastapi pyaudio


三、模型一键下载(本地 GPU 服务器)

from modelscope import snapshot_download

# ASR
snapshot_download("qwen/Qwen3-ASR-1.7B", local_dir="./models/qwen3_asr")

# LLM
snapshot_download("qwen/Qwen3.5-9B-Chat", local_dir="./models/qwen3.5_9b_chat")

# Embedding
snapshot_download("qwen/Qwen-Embedding-3-Large", local_dir="./models/qwen_emb3")

# TTS
snapshot_download("qwen/Qwen3-TTS-1.5B", local_dir="./models/qwen3_tts")

四、全链路代码(Qwen3.5 + Qwen3 语音,GPU 本地)

import torch
import faiss
import numpy as np
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModel
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks

# 路径配置
ASR_PATH = "./models/qwen3_asr"
LLM_PATH = "./models/qwen3.5_9b_chat"
EMB_PATH = "./models/qwen_emb3"
TTS_PATH = "./models/qwen3_tts"
DEVICE = "cuda"

# ---------------------- 1. ASR ----------------------
def qwen3_asr(audio_file):
    print("ASR: Qwen3-ASR-1.7B 识别中...")
    asr_pipe = pipeline(Tasks.automatic_speech_recognition, model=ASR_PATH, device=DEVICE)
    text = asr_pipe(audio_file)["text"]
    print(f"ASR结果: {text}")
    return text

# ---------------------- 2. 知识库检索 ----------------------
emb_tokenizer = AutoTokenizer.from_pretrained(EMB_PATH)
emb_model = AutoModel.from_pretrained(EMB_PATH).to(DEVICE).eval()
index = faiss.read_index("knowledge.faiss")

def get_embedding(text):
    inputs = emb_tokenizer(text, return_tensors="pt", truncation=True).to(DEVICE)
    with torch.no_grad():
        emb = emb_model(**inputs).last_hidden_state[:, 0, :]
    return emb.cpu().numpy()

def search_knowledge(query):
    print("知识库: Qwen-Embedding-3 检索中...")
    emb = get_embedding(query)
    faiss.normalize_L2(emb)
    _, idx = index.search(emb, 3)
    with open("knowledge_chunks.txt", encoding="utf-8") as f:
        chunks = f.readlines()
    context = "\n".join([chunks[i] for i in idx[0] if i < len(chunks)])
    return context

# ---------------------- 3. LLM ----------------------
llm_tokenizer = AutoTokenizer.from_pretrained(LLM_PATH)
llm_model = AutoModelForCausalLM.from_pretrained(
    LLM_PATH,
    device_map="auto",
    trust_remote_code=True
).eval()

def qwen3.5_llm(query, context):
    print("LLM: Qwen3.5-9B 生成中...")
    prompt = f"参考资料:{context}\n用户问题:{query}\n请直接回答:"
    messages = [{"role": "user", "content": prompt}]
    inputs = llm_tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True).to(DEVICE)

    with torch.no_grad():
        outputs = llm_model.generate(
            inputs,
            max_new_tokens=1024,
            temperature=0.6,
            top_p=0.7,
            do_sample=True
        )
    answer = llm_tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
    print(f"LLM回答: {answer}")
    return answer

# ---------------------- 4. TTS ----------------------
def qwen3_tts(text, out="result.wav"):
    print("TTS: Qwen3-TTS-1.5B 合成中...")
    tts_pipe = pipeline(Tasks.text_to_speech, model=TTS_PATH, device=DEVICE)
    wav = tts_pipe(text)["output_wav"]
    import soundfile as sf
    sf.write(out, wav, 16000)
    print(f"TTS完成: {out}")
    return out

# ---------------------- 主流程 ----------------------
def run_digital_human(audio_file):
    user_text = qwen3_asr(audio_file)
    context = search_knowledge(user_text)
    answer = qwen3.5_llm(user_text, context)
    voice = qwen3_tts(answer)
    return answer, voice

if __name__ == "__main__":
    run_digital_human("input.wav")

五、知识库构建(Qwen-Embedding-3)

from modelscope import AutoTokenizer, AutoModel
import faiss
import torch

model = AutoModel.from_pretrained("./models/qwen_emb3").cuda().eval()
tokenizer = AutoTokenizer.from_pretrained("./models/qwen_emb3")

def encode(texts):
    inputs = tokenizer(texts, padding=True, truncation=True, return_tensors="pt").to("cuda")
    with torch.no_grad():
        emb = model(**inputs).last_hidden_state[:,0,:]
    return emb.cpu().numpy()

# 读取知识库
with open("knowledge.txt", encoding="utf-8") as f:
    chunks = [line.strip() for line in f if line.strip()]

# 构建向量库
embeddings = encode(chunks)
index = faiss.IndexFlatIP(embeddings.shape[1])
faiss.normalize_L2(embeddings)
index.add(embeddings)
faiss.write_index(index, "knowledge.faiss")

# 保存文本块
with open("knowledge_chunks.txt", "w", encoding="utf-8") as f:
    f.write("\n".join(chunks))

print("✅ 知识库构建完成")

六、服务化部署(FastAPI,供数字人调用)

from fastapi import FastAPI, UploadFile
app = FastAPI()

@app.post("/digital_human")
async def run(file: UploadFile):
    with open("temp.wav", "wb") as f:
        f.write(await file.read())
    ans, voice = run_digital_human("temp.wav")
    return {"answer": ans, "audio": voice}


启动:

uvicorn main:app --host 0.0.0.0 --port 10888



  业务实施流程

需求调研 →

团队组建和动员 →

数据初始化 →

调试完善 →

解决方案和选型 →

硬件网络部署 →

系统部署试运行 →

系统正式上线 →

合作协议

系统开发/整合

制作文档和员工培训

售后服务

马上咨询: 如果您有业务方面的问题或者需求,欢迎您咨询!我们带来的不仅仅是技术,还有行业经验积累。
QQ: 39764417/308460098     Phone: 13 9800 1 9844 / 135 6887 9550     联系人:石先生/雷先生