Don't keep model in memory, breaks Stable Diffusion

This commit is contained in:
Tanner Collin 2025-01-21 04:05:29 +00:00
parent dcd8af2895
commit 641eadb253

View File

@ -23,7 +23,7 @@ def controller_message(message):
return False
def llama(prompt):
data = dict(model='llama3.1', prompt=prompt, stream=False, keep_alive=-1)
data = dict(model='llama3.1', prompt=prompt, stream=False, keep_alive=25) # don't keep in memory, interferes with Stable Diffusion
try:
r = requests.post(LLAMA_URL, json=data, timeout=20)
r.raise_for_status()