Fixes #16 - mega model running out of memory

Prior to this fix the mega model will (more often than not) fail when running in succession.

Clearing the cache seems to fix the issue.
This commit is contained in:
w4ffl35 2022-07-04 00:26:17 -06:00
parent 6b433d5ba6
commit 7d549505da

View File

@ -142,6 +142,7 @@ class MinDalle:
params = torch.load(self.detoker_params_path) params = torch.load(self.detoker_params_path)
self.detokenizer.load_state_dict(params) self.detokenizer.load_state_dict(params)
del params del params
torch.cuda.empty_cache()
if torch.cuda.is_available(): self.detokenizer = self.detokenizer.cuda() if torch.cuda.is_available(): self.detokenizer = self.detokenizer.cuda()
@ -175,6 +176,7 @@ class MinDalle:
encoder_state encoder_state
) )
if not self.is_reusable: del self.decoder if not self.is_reusable: del self.decoder
torch.cuda.empty_cache()
return image_tokens return image_tokens