Fixes #16 - mega model running out of memory
Prior to this fix the mega model will (more often than not) fail when running in succession. Clearing the cache seems to fix the issue.
This commit is contained in:
parent
6b433d5ba6
commit
7d549505da
|
@ -142,6 +142,7 @@ class MinDalle:
|
|||
params = torch.load(self.detoker_params_path)
|
||||
self.detokenizer.load_state_dict(params)
|
||||
del params
|
||||
torch.cuda.empty_cache()
|
||||
if torch.cuda.is_available(): self.detokenizer = self.detokenizer.cuda()
|
||||
|
||||
|
||||
|
@ -175,6 +176,7 @@ class MinDalle:
|
|||
encoder_state
|
||||
)
|
||||
if not self.is_reusable: del self.decoder
|
||||
torch.cuda.empty_cache()
|
||||
return image_tokens
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user