Commit fab3229e authored by 于飞's avatar 于飞

修改非遗换行

parent 614cde8a
...@@ -544,10 +544,11 @@ async def stream_generator(chat, incremental: bool, model_name: str): ...@@ -544,10 +544,11 @@ async def stream_generator(chat, incremental: bool, model_name: str):
json_chunk = model_to_json( json_chunk = model_to_json(
chunk, exclude_unset=True, ensure_ascii=False chunk, exclude_unset=True, ensure_ascii=False
) )
json_chunk = json_chunk.replace("\\n", "\n")
yield f"data: {json_chunk}\n\n" yield f"data: {json_chunk}\n\n"
else: else:
# TODO generate an openai-compatible streaming responses # TODO generate an openai-compatible streaming responses
# msg = msg.replace("\n", "\\n") msg = msg.replace("\n", "\\n")
yield f"data:{msg}\n\n" yield f"data:{msg}\n\n"
previous_response = msg previous_response = msg
await asyncio.sleep(0.02) await asyncio.sleep(0.02)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment