simplified attention for torch model

This commit is contained in:
Brett Kuprel 2022-06-29 13:48:12 -04:00
parent 95afa18893
commit 661ec976ac
2 changed files with 14 additions and 21 deletions

View File

@ -16,12 +16,6 @@ class DecoderCrossAttentionTorch(AttentionTorch):
keys = self.k_proj.forward(encoder_state) keys = self.k_proj.forward(encoder_state)
values = self.v_proj.forward(encoder_state) values = self.v_proj.forward(encoder_state)
queries = self.q_proj.forward(decoder_state) queries = self.q_proj.forward(decoder_state)
query_shape = queries.shape[:2] + (self.head_count, -1)
key_value_shape = keys.shape[:2] + (self.head_count, -1)
keys = keys.reshape(key_value_shape)
values = values.reshape(key_value_shape)
queries = queries.reshape(query_shape)
queries /= queries.shape[-1] ** 0.5
return super().forward(keys, values, queries, attention_mask) return super().forward(keys, values, queries, attention_mask)
@ -34,16 +28,14 @@ class DecoderSelfAttentionTorch(AttentionTorch):
token_mask: BoolTensor token_mask: BoolTensor
) -> Tuple[FloatTensor, FloatTensor]: ) -> Tuple[FloatTensor, FloatTensor]:
batch_count = decoder_state.shape[0] batch_count = decoder_state.shape[0]
shape = (batch_count, 1) + keys_values.shape[2:] keys = self.k_proj.forward(decoder_state)
keys = self.k_proj.forward(decoder_state).view(shape) values = self.v_proj.forward(decoder_state)
values = self.v_proj.forward(decoder_state).view(shape) queries = self.q_proj.forward(decoder_state)
keys_values = torch.where( keys_values = torch.where(
token_mask[None, :, None, None], token_mask[None, :, None],
torch.cat([keys, values]), torch.cat([keys, values]),
keys_values keys_values
) )
queries = self.q_proj.forward(decoder_state).reshape(shape)
queries /= queries.shape[-1] ** 0.5
keys, values = keys_values[:batch_count], keys_values[batch_count:] keys, values = keys_values[:batch_count], keys_values[batch_count:]
decoder_state = super().forward(keys, values, queries, attention_mask) decoder_state = super().forward(keys, values, queries, attention_mask)
return decoder_state, keys_values return decoder_state, keys_values
@ -150,8 +142,7 @@ class DalleBartDecoderTorch(nn.Module):
self.keys_values_state_shape = ( self.keys_values_state_shape = (
layer_count * 2 * batch_count, layer_count * 2 * batch_count,
image_token_count, image_token_count,
attention_head_count, embed_count
embed_count // attention_head_count
) )
self.zero_prob = torch.zeros([1]) self.zero_prob = torch.zeros([1])
self.token_indices = torch.arange(self.sample_token_count) self.token_indices = torch.arange(self.sample_token_count)
@ -188,7 +179,6 @@ class DalleBartDecoderTorch(nn.Module):
token_index[:1] token_index[:1]
) )
keys_values.append(keys_values_layer) keys_values.append(keys_values_layer)
keys_values = torch.cat(keys_values, dim=0)
decoder_state = self.final_ln(decoder_state) decoder_state = self.final_ln(decoder_state)
logits = self.lm_head(decoder_state) logits = self.lm_head(decoder_state)
a = self.condition_factor a = self.condition_factor
@ -200,7 +190,7 @@ class DalleBartDecoderTorch(nn.Module):
self.zero_prob, self.zero_prob,
torch.exp(logits - top_logits[0]) torch.exp(logits - top_logits[0])
) )
return probs, keys_values return probs, torch.cat(keys_values)
def forward( def forward(

View File

@ -44,6 +44,11 @@ class AttentionTorch(nn.Module):
queries: FloatTensor, queries: FloatTensor,
attention_mask: BoolTensor attention_mask: BoolTensor
) -> FloatTensor: ) -> FloatTensor:
keys = keys.reshape(keys.shape[:2] + (self.head_count, -1))
values = values.reshape(values.shape[:2] + (self.head_count, -1))
queries = queries.reshape(queries.shape[:2] + (self.head_count, -1))
queries /= queries.shape[-1] ** 0.5
attention_bias = torch.where( attention_bias = torch.where(
attention_mask, attention_mask,
self.one * 0, self.one * 0,
@ -73,11 +78,9 @@ class EncoderSelfAttentionTorch(AttentionTorch):
encoder_state: FloatTensor, encoder_state: FloatTensor,
attention_mask: BoolTensor attention_mask: BoolTensor
) -> FloatTensor: ) -> FloatTensor:
shape_split = encoder_state.shape[:2] + (self.head_count, -1) keys = self.k_proj.forward(encoder_state)
keys = self.k_proj.forward(encoder_state).reshape(shape_split) values = self.v_proj.forward(encoder_state)
values = self.v_proj.forward(encoder_state).reshape(shape_split) queries = self.q_proj.forward(encoder_state)
queries = self.q_proj.forward(encoder_state).reshape(shape_split)
queries /= queries.shape[-1] ** 0.5
return super().forward(keys, values, queries, attention_mask) return super().forward(keys, values, queries, attention_mask)