Refactor: Format Code with Ruff and Update Deprecated G2PW Link (#2255)
* ruff check --fix * ruff format --line-length 120 --target-version py39 * Change the link for G2PW Model * update pytorch version and colab
This commit is contained in:
@@ -7,23 +7,22 @@ import torch.nn.functional as F
|
||||
def exists(val):
|
||||
return val is not None
|
||||
|
||||
|
||||
def default(v, d):
|
||||
return v if exists(v) else d
|
||||
|
||||
|
||||
class Attend(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
dropout = 0.,
|
||||
flash = False,
|
||||
scale = None
|
||||
):
|
||||
def __init__(self, dropout=0.0, flash=False, scale=None):
|
||||
super().__init__()
|
||||
self.scale = scale
|
||||
self.dropout = dropout
|
||||
self.attn_dropout = nn.Dropout(dropout)
|
||||
|
||||
self.flash = flash
|
||||
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
|
||||
assert not (flash and version.parse(torch.__version__) < version.parse("2.0.0")), (
|
||||
"in order to use flash attention, you must be using pytorch 2.0 or above"
|
||||
)
|
||||
|
||||
def flash_attn(self, q, k, v):
|
||||
# _, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
|
||||
@@ -34,7 +33,7 @@ class Attend(nn.Module):
|
||||
|
||||
# pytorch 2.0 flash attn: q, k, v, mask, dropout, softmax_scale
|
||||
# with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True):
|
||||
return F.scaled_dot_product_attention(q, k, v,dropout_p = self.dropout if self.training else 0.)
|
||||
return F.scaled_dot_product_attention(q, k, v, dropout_p=self.dropout if self.training else 0.0)
|
||||
|
||||
def forward(self, q, k, v):
|
||||
"""
|
||||
@@ -54,7 +53,7 @@ class Attend(nn.Module):
|
||||
|
||||
# similarity
|
||||
|
||||
sim = einsum(f"b h i d, b h j d -> b h i j", q, k) * scale
|
||||
sim = einsum("b h i d, b h j d -> b h i j", q, k) * scale
|
||||
|
||||
# attention
|
||||
|
||||
@@ -63,6 +62,6 @@ class Attend(nn.Module):
|
||||
|
||||
# aggregate values
|
||||
|
||||
out = einsum(f"b h i j, b h j d -> b h i d", attn, v)
|
||||
out = einsum("b h i j, b h j d -> b h i d", attn, v)
|
||||
|
||||
return out
|
||||
|
||||
Reference in New Issue
Block a user