Fixes #16 - mega model running out of memory
Prior to this fix the mega model will (more often than not) fail when running in succession. Clearing the cache seems to fix the issue.
This commit is contained in:
@@ -142,6 +142,7 @@ class MinDalle:
|
||||
params = torch.load(self.detoker_params_path)
|
||||
self.detokenizer.load_state_dict(params)
|
||||
del params
|
||||
torch.cuda.empty_cache()
|
||||
if torch.cuda.is_available(): self.detokenizer = self.detokenizer.cuda()
|
||||
|
||||
|
||||
@@ -175,6 +176,7 @@ class MinDalle:
|
||||
encoder_state
|
||||
)
|
||||
if not self.is_reusable: del self.decoder
|
||||
torch.cuda.empty_cache()
|
||||
return image_tokens
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user