faster decoder self attention

This commit is contained in:
Brett Kuprel 2022-07-04 08:05:55 -04:00
parent a79d30f718
commit 377d15cb16
4 changed files with 21 additions and 23 deletions

2
min_dalle.ipynb vendored
View File

@ -178,8 +178,8 @@
"%%time\n", "%%time\n",
"\n", "\n",
"text = \"Dali painting of WALL·E\" #@param {type:\"string\"}\n", "text = \"Dali painting of WALL·E\" #@param {type:\"string\"}\n",
"seed = 0 #@param {type:\"integer\"}\n",
"grid_size = 2 #@param {type:\"integer\"}\n", "grid_size = 2 #@param {type:\"integer\"}\n",
"seed = -1 #@param {type:\"integer\"}\n",
"\n", "\n",
"display(model.generate_image(text, seed, grid_size))" "display(model.generate_image(text, seed, grid_size))"
] ]

View File

@ -165,6 +165,7 @@ class MinDalle:
if self.is_verbose: print("encoding text tokens") if self.is_verbose: print("encoding text tokens")
encoder_state = self.encoder.forward(text_tokens) encoder_state = self.encoder.forward(text_tokens)
if not self.is_reusable: del self.encoder if not self.is_reusable: del self.encoder
if torch.cuda.is_available(): torch.cuda.empty_cache()
if not self.is_reusable: self.init_decoder() if not self.is_reusable: self.init_decoder()
if self.is_verbose: print("sampling image tokens") if self.is_verbose: print("sampling image tokens")
@ -175,7 +176,6 @@ class MinDalle:
encoder_state encoder_state
) )
if not self.is_reusable: del self.decoder if not self.is_reusable: del self.decoder
if torch.cuda.is_available(): torch.cuda.empty_cache()
return image_tokens return image_tokens
@ -187,6 +187,7 @@ class MinDalle:
) -> Image.Image: ) -> Image.Image:
image_count = grid_size ** 2 image_count = grid_size ** 2
image_tokens = self.generate_image_tokens(text, seed, image_count) image_tokens = self.generate_image_tokens(text, seed, image_count)
if torch.cuda.is_available(): torch.cuda.empty_cache()
if not self.is_reusable: self.init_detokenizer() if not self.is_reusable: self.init_detokenizer()
if self.is_verbose: print("detokenizing image") if self.is_verbose: print("detokenizing image")
images = self.detokenizer.forward(image_tokens).to(torch.uint8) images = self.detokenizer.forward(image_tokens).to(torch.uint8)
@ -194,4 +195,5 @@ class MinDalle:
images = images.reshape([grid_size] * 2 + list(images.shape[1:])) images = images.reshape([grid_size] * 2 + list(images.shape[1:]))
image = images.flatten(1, 2).transpose(0, 1).flatten(1, 2) image = images.flatten(1, 2).transpose(0, 1).flatten(1, 2)
image = Image.fromarray(image.to('cpu').detach().numpy()) image = Image.fromarray(image.to('cpu').detach().numpy())
if torch.cuda.is_available(): torch.cuda.empty_cache()
return image return image

View File

@ -20,25 +20,28 @@ class DecoderCrossAttention(AttentionBase):
class DecoderSelfAttention(AttentionBase): class DecoderSelfAttention(AttentionBase):
def __init__(self, head_count: int, embed_count: int):
super().__init__(head_count, embed_count)
token_indices = torch.arange(256)
if torch.cuda.is_available(): token_indices = token_indices.cuda()
self.token_indices = token_indices
def forward( def forward(
self, self,
decoder_state: FloatTensor, decoder_state: FloatTensor,
attention_state: FloatTensor, attention_state: FloatTensor,
attention_mask: BoolTensor, token_index: LongTensor
token_mask: BoolTensor
) -> Tuple[FloatTensor, FloatTensor]: ) -> Tuple[FloatTensor, FloatTensor]:
keys = self.k_proj.forward(decoder_state) keys = self.k_proj.forward(decoder_state)
values = self.v_proj.forward(decoder_state) values = self.v_proj.forward(decoder_state)
queries = self.q_proj.forward(decoder_state) queries = self.q_proj.forward(decoder_state)
attention_state = torch.where( attn_mask = self.token_indices < token_index + 1
token_mask[None, :, None], attn_mask = attn_mask[None][[0] * decoder_state.shape[0]]
torch.cat([keys, values]), attention_state[:, token_index] = torch.cat([keys, values])
attention_state
)
batch_count = decoder_state.shape[0] batch_count = decoder_state.shape[0]
keys = attention_state[:batch_count] keys = attention_state[:batch_count]
values = attention_state[batch_count:] values = attention_state[batch_count:]
decoder_state = super().forward(keys, values, queries, attention_mask) decoder_state = super().forward(keys, values, queries, attn_mask)
return decoder_state, attention_state return decoder_state, attention_state
@ -60,9 +63,6 @@ class DecoderLayer(nn.Module):
self.encoder_attn_layer_norm = nn.LayerNorm(embed_count) self.encoder_attn_layer_norm = nn.LayerNorm(embed_count)
self.glu = GLU(embed_count, glu_embed_count) self.glu = GLU(embed_count, glu_embed_count)
self.token_indices = torch.arange(self.image_token_count)
if torch.cuda.is_available():
self.token_indices = self.token_indices.cuda()
def forward( def forward(
self, self,
@ -75,14 +75,10 @@ class DecoderLayer(nn.Module):
# Self Attention # Self Attention
residual = decoder_state residual = decoder_state
decoder_state = self.pre_self_attn_layer_norm.forward(decoder_state) decoder_state = self.pre_self_attn_layer_norm.forward(decoder_state)
self_attn_mask = self.token_indices < token_index + 1
self_attn_mask = self_attn_mask[None][[0] * decoder_state.shape[0]]
token_mask = self.token_indices == token_index
decoder_state, attention_state = self.self_attn.forward( decoder_state, attention_state = self.self_attn.forward(
decoder_state, decoder_state,
attention_state, attention_state,
self_attn_mask, token_index
token_mask
) )
decoder_state = self.self_attn_layer_norm.forward(decoder_state) decoder_state = self.self_attn_layer_norm.forward(decoder_state)
decoder_state = residual + decoder_state decoder_state = residual + decoder_state

View File

@ -13,16 +13,16 @@ class Predictor(BasePredictor):
description='Text', description='Text',
default='Dali painting of WALL·E' default='Dali painting of WALL·E'
), ),
seed: int = Input(
description='Set the seed to a positive number for reproducible results',
default=-1
),
grid_size: int = Input( grid_size: int = Input(
description='Size of the image grid', description='Size of the image grid',
ge=1, ge=1,
le=4, le=4,
default=4 default=4
) ),
seed: int = Input(
description='Set the seed to a positive number for reproducible results',
default=-1
),
) -> Path: ) -> Path:
image = self.model.generate_image(text, seed, grid_size=grid_size) image = self.model.generate_image(text, seed, grid_size=grid_size)
out_path = Path(tempfile.mkdtemp()) / 'output.jpg' out_path = Path(tempfile.mkdtemp()) / 'output.jpg'