【Triton 教程】融合注意力 (Fused Attention)

news2024/11/22 11:58:37

Triton 是一种用于并行编程的语言和编译器。它旨在提供一个基于 Python 的编程环境,以高效编写自定义 DNN 计算内核,并能够在现代 GPU 硬件上以最大吞吐量运行。

更多 Triton 中文文档可访问 →https://triton.hyper.ai/

这是根据 Tri Dao 的 Flash Attention v2 算法的 Triton 实现。致谢:OpenAI 核心团队

特别鸣谢:

  • Flash Attention 原始论文(https://arxiv.org/abs/2205.14135)

  • Rabe 和 Staats(https://arxiv.org/pdf/2112.05682v2.pdf)

在这里插入图片描述
在这里插入图片描述
Out:

fused-attention-batch4-head32-d64-fwd-causal=True:
在这里插入图片描述
fused-attention-batch4-head32-d64-fwd-causal=False:
在这里插入图片描述
fused-attention-batch4-head32-d64-bwd-causal=True:
在这里插入图片描述

import pytest
import torch


import triton
import triton.language as tl




def is_hip():
    return triton.runtime.driver.active.get_current_target().backend == "hip"




@triton.jit
def _attn_fwd_inner(acc, l_i, m_i, q,  #
                    K_block_ptr, V_block_ptr,  #
                    start_m, qk_scale,  #
                    BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr,  #
                    STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr,  #
                    N_CTX: tl.constexpr, fp8_v: tl.constexpr):
    # range of values handled by this stage
    # 这个阶段处理的数值范围
    if STAGE == 1:
        lo, hi = 0, start_m * BLOCK_M
    elif STAGE == 2:
        lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
        lo = tl.multiple_of(lo, BLOCK_M)
    # causal = False
    else:
        lo, hi = 0, N_CTX
    K_block_ptr = tl.advance(K_block_ptr, (0, lo))
    V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
    # loop over k, v and update accumulator
    # 循环遍历 k 和 v,并更新累加器。
    for start_n in range(lo, hi, BLOCK_N):
        start_n = tl.multiple_of(start_n, BLOCK_N)
        # -- compute qk ----
        # -- 计算 qk ----
        k = tl.load(K_block_ptr)
        qk = tl.dot(q, k)
        if STAGE == 2:
            mask = offs_m[:, None] >= (start_n + offs_n[None, :])
            qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
            m_ij = tl.maximum(m_i, tl.max(qk, 1))
            qk -= m_ij[:, None]
        else:
            m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
            qk = qk * qk_scale - m_ij[:, None]
        p = tl.math.exp2(qk)
        l_ij = tl.sum(p, 1)
        # -- update m_i and l_i
        # -- 更新 m_i 和 l_i
        alpha = tl.math.exp2(m_i - m_ij)
        l_i = l_i * alpha + l_ij
        # -- update output accumulator --
        # -- 更新输出累加器 --
        acc = acc * alpha[:, None]
        # update acc
        # 更新 acc
        v = tl.load(V_block_ptr)
        if fp8_v:
            p = p.to(tl.float8e5)
        else:
            p = p.to(tl.float16)
        acc = tl.dot(p, v, acc)
        # update m_i and l_i
        # 更新 m_i 和 l_i
        m_i = m_ij
        V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
        K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
    return acc, l_i, m_i




# We don't run auto-tuning every time to keep the tutorial fast. Keeping
# the code below and commenting out the equivalent parameters is convenient for
# re-tuning.
# 为了重新调整,我们不会每次都运行自动调优以保持教程的快速性。
# 保留下面的代码并注释掉等效的参数是方便的做法。
configs = [
    triton.Config({'BLOCK_M': BM, 'BLOCK_N': BN}, num_stages=s, num_warps=w) \
    for BM in [64, 128]\
    for BN in [32, 64]\
    for s in ([1] if is_hip() else [3, 4, 7])\
    for w in [4, 8]\
]




def keep(conf):
    BLOCK_M = conf.kwargs["BLOCK_M"]
    BLOCK_N = conf.kwargs["BLOCK_N"]
    if BLOCK_M * BLOCK_N < 128 * 128 and conf.num_warps == 8:
        return False
    return True




@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
@triton.jit
def _attn_fwd(Q, K, V, sm_scale, M, Out,  #
              stride_qz, stride_qh, stride_qm, stride_qk,  #
              stride_kz, stride_kh, stride_kn, stride_kk,  #
              stride_vz, stride_vh, stride_vk, stride_vn,  #
              stride_oz, stride_oh, stride_om, stride_on,  #
              Z, H, N_CTX,  #
              HEAD_DIM: tl.constexpr,  #
              BLOCK_M: tl.constexpr,  #
              BLOCK_N: tl.constexpr,  #
              STAGE: tl.constexpr  #
              ):
    tl.static_assert(BLOCK_N <= HEAD_DIM)
    start_m = tl.program_id(0)
    off_hz = tl.program_id(1)
    off_z = off_hz // H
    off_h = off_hz % H
    qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh


    # block pointers
    # 块指针
    Q_block_ptr = tl.make_block_ptr(
        base=Q + qvk_offset,
        shape=(N_CTX, HEAD_DIM),
        strides=(stride_qm, stride_qk),
        offsets=(start_m * BLOCK_M, 0),
        block_shape=(BLOCK_M, HEAD_DIM),
        order=(1, 0),
    )
    v_order: tl.constexpr = (0, 1) if V.dtype.element_ty == tl.float8e5 else (1, 0)
    V_block_ptr = tl.make_block_ptr(
        base=V + qvk_offset,
        shape=(N_CTX, HEAD_DIM),
        strides=(stride_vk, stride_vn),
        offsets=(0, 0),
        block_shape=(BLOCK_N, HEAD_DIM),
        order=v_order,
    )
    K_block_ptr = tl.make_block_ptr(
        base=K + qvk_offset,
        shape=(HEAD_DIM, N_CTX),
        strides=(stride_kk, stride_kn),
        offsets=(0, 0),
        block_shape=(HEAD_DIM, BLOCK_N),
        order=(0, 1),
    )
    O_block_ptr = tl.make_block_ptr(
        base=Out + qvk_offset,
        shape=(N_CTX, HEAD_DIM),
        strides=(stride_om, stride_on),
        offsets=(start_m * BLOCK_M, 0),
        block_shape=(BLOCK_M, HEAD_DIM),
        order=(1, 0),
    )
    # initialize offsets
    # 初始化偏移量
    offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
    offs_n = tl.arange(0, BLOCK_N)
    # initialize pointer to m and l、
    # 初始化 m 和 l 的指针
    m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
    l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
    acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
    # load scales
    # 加载系数
    qk_scale = sm_scale
    qk_scale *= 1.44269504  # 1/log(2)
    # load q: it will stay in SRAM throughout
    # 加载 q: 它将始终保留在 SRAM 中
    q = tl.load(Q_block_ptr)
    # stage 1: off-band
    # 阶段 1: off-band
    # For causal = True, STAGE = 3 and _attn_fwd_inner gets 1 as its STAGE
    # 对于 causal = True,STAGE = 3,而 _attn_fwd_inner 的 STAGE 参数设定为 1# For causal = False, STAGE = 1, and _attn_fwd_inner gets 3 as its STAGE
    # 对于 causal = False, STAGE = 1, 而 _attn_fwd_inner 的 STAGE 参数设定为 3if STAGE & 1:
        acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr,  #
                                        start_m, qk_scale,  #
                                        BLOCK_M, HEAD_DIM, BLOCK_N,  #
                                        4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5  #
                                        )
    # stage 2: on-band
    # 阶段 2: on-band
    if STAGE & 2:
        # barrier makes it easier for compielr to schedule the
        # two loops independently
        acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr,  #
                                        start_m, qk_scale,  #
                                        BLOCK_M, HEAD_DIM, BLOCK_N,  #
                                        2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5  #
                                        )
    # epilogue
    # 结尾
    m_i += tl.math.log2(l_i)
    acc = acc / l_i[:, None]
    m_ptrs = M + off_hz * N_CTX + offs_m
    tl.store(m_ptrs, m_i)
    tl.store(O_block_ptr, acc.to(Out.type.element_ty))




@triton.jit
def _attn_bwd_preprocess(O, DO,  #
                         Delta,  #
                         Z, H, N_CTX,  #
                         BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr  #
                         ):
    off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
    off_hz = tl.program_id(1)
    off_n = tl.arange(0, HEAD_DIM)
    # load
    # 加载
    o = tl.load(O + off_hz * HEAD_DIM * N_CTX + off_m[:, None] * HEAD_DIM + off_n[None, :])
    do = tl.load(DO + off_hz * HEAD_DIM * N_CTX + off_m[:, None] * HEAD_DIM + off_n[None, :]).to(tl.float32)
    delta = tl.sum(o * do, axis=1)
    # write-back
    # 写回
    tl.store(Delta + off_hz * N_CTX + off_m, delta)




# The main inner-loop logic for computing dK and dV.
# 内循环的主要逻辑是计算 dK 和 dV
@triton.jit
def _attn_bwd_dkdv(dk, dv,  #
                   Q, k, v, sm_scale,  #
                   DO,  #
                   M, D,  #
                   # shared by Q/K/V/DO.
                   # 由 Q/K/V/DO 共享
                   stride_tok, stride_d,  #
                   H, N_CTX, BLOCK_M1: tl.constexpr,  #
                   BLOCK_N1: tl.constexpr,  #
                   HEAD_DIM: tl.constexpr,  #
                   # Filled in by the wrapper.
                   # 由 wrapper 填充
                   start_n, start_m, num_steps,  #
                   MASK: tl.constexpr):
    offs_m = start_m + tl.arange(0, BLOCK_M1)
    offs_n = start_n + tl.arange(0, BLOCK_N1)
    offs_k = tl.arange(0, HEAD_DIM)
    qT_ptrs = Q + offs_m[None, :] * stride_tok + offs_k[:, None] * stride_d
    do_ptrs = DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
    # BLOCK_N1 must be a multiple of BLOCK_M1, otherwise the code wouldn't work.
    # BLOCK_N1 必须是 BLOCK_M1 的倍数,否则代码将无法正常工作。
    tl.static_assert(BLOCK_N1 % BLOCK_M1 == 0)
    curr_m = start_m
    step_m = BLOCK_M1
    for blk_idx in range(num_steps):
        qT = tl.load(qT_ptrs)
        # Load m before computing qk to reduce pipeline stall.
        # 在计算 qk 前先加载 m,以减少流水线停顿。
        offs_m = curr_m + tl.arange(0, BLOCK_M1)
        m = tl.load(M + offs_m)
        qkT = tl.dot(k, qT)
        pT = tl.math.exp2(qkT - m[None, :])
        # Autoregressive masking.
        # 自回归掩码。
        if MASK:
            mask = (offs_m[None, :] >= offs_n[:, None])
            pT = tl.where(mask, pT, 0.0)
        do = tl.load(do_ptrs)
        # Compute dV.
        # 计算 dV.
        ppT = pT
        ppT = ppT.to(tl.float16)
        dv += tl.dot(ppT, do)
        # D (= delta) is pre-divided by ds_scale.
        # D(= delta)已经通过 ds_scale 进行了预除。
        Di = tl.load(D + offs_m)
        # Compute dP and dS.
        # 计算 dP 和 dS.
        dpT = tl.dot(v, tl.trans(do)).to(tl.float32)
        dsT = pT * (dpT - Di[None, :])
        dsT = dsT.to(tl.float16)
        dk += tl.dot(dsT, tl.trans(qT))
        # Increment pointers.
        # 增加指针。
        curr_m += step_m
        qT_ptrs += step_m * stride_tok
        do_ptrs += step_m * stride_tok
    return dk, dv




# the main inner-loop logic for computing dQ
# 内循环的主要逻辑是计算 dQ
@triton.jit
def _attn_bwd_dq(dq, q, K, V,  #
                 do, m, D,
                 # shared by Q/K/V/DO.
                 # 由 Q/K/V/DO 共享
                 stride_tok, stride_d,  #
                 H, N_CTX,  #
                 BLOCK_M2: tl.constexpr,  #
                 BLOCK_N2: tl.constexpr,  #
                 HEAD_DIM: tl.constexpr,
                 # Filled in by the wrapper.
                 # 由 wrapper 填充
                 start_m, start_n, num_steps,  #
                 MASK: tl.constexpr):
    offs_m = start_m + tl.arange(0, BLOCK_M2)
    offs_n = start_n + tl.arange(0, BLOCK_N2)
    offs_k = tl.arange(0, HEAD_DIM)
    kT_ptrs = K + offs_n[None, :] * stride_tok + offs_k[:, None] * stride_d
    vT_ptrs = V + offs_n[None, :] * stride_tok + offs_k[:, None] * stride_d
    # D (= delta) is pre-divided by ds_scale.
    # D(即 delta)在使用前已经被 ds_scale 预先除以。
    Di = tl.load(D + offs_m)
    # BLOCK_M2 must be a multiple of BLOCK_N2, otherwise the code wouldn't work.
    # BLOCK_M2 必须是 BLOCK_N2 的倍数,否则代码将无法正常工作。
    tl.static_assert(BLOCK_M2 % BLOCK_N2 == 0)
    curr_n = start_n
    step_n = BLOCK_N2
    for blk_idx in range(num_steps):
        kT = tl.load(kT_ptrs)
        vT = tl.load(vT_ptrs)
        qk = tl.dot(q, kT)
        p = tl.math.exp2(qk - m)
        # Autoregressive masking.
        # 自回归掩码。
        if MASK:
            offs_n = curr_n + tl.arange(0, BLOCK_N2)
            mask = (offs_m[:, None] >= offs_n[None, :])
            p = tl.where(mask, p, 0.0)
        # Compute dP and dS.
        # 计算 dP 和 dS.
        dp = tl.dot(do, vT).to(tl.float32)
        ds = p * (dp - Di[:, None])
        ds = ds.to(tl.float16)
        # Compute dQ.
        # 计算 dQ.
        # NOTE: We need to de-scale dq in the end, because kT was pre-scaled.
        # 注意:我们需要在最后对 dq 进行反缩放,因为 kT 已经进行了预缩放。
        dq += tl.dot(ds, tl.trans(kT))
        # 增加指针。
        curr_n += step_n
        kT_ptrs += step_n * stride_tok
        vT_ptrs += step_n * stride_tok
    return dq




@triton.jit
def _attn_bwd(Q, K, V, sm_scale,  #
              DO,  #
              DQ, DK, DV,  #
              M, D,
              # shared by Q/K/V/DO.
              # 由 Q/K/V/DO 共享
              stride_z, stride_h, stride_tok, stride_d,  #
              H, N_CTX,  #
              BLOCK_M1: tl.constexpr,  #
              BLOCK_N1: tl.constexpr,  #
              BLOCK_M2: tl.constexpr,  #
              BLOCK_N2: tl.constexpr,  #
              BLK_SLICE_FACTOR: tl.constexpr,  #
              HEAD_DIM: tl.constexpr):
    LN2: tl.constexpr = 0.6931471824645996  # = ln(2)


    bhid = tl.program_id(2)
    off_chz = (bhid * N_CTX).to(tl.int64)
    adj = (stride_h * (bhid % H) + stride_z * (bhid // H)).to(tl.int64)
    pid = tl.program_id(0)


    # offset pointers for batch/head
    # 为 batch/head 偏移指针。
    Q += adj
    K += adj
    V += adj
    DO += adj
    DQ += adj
    DK += adj
    DV += adj
    M += off_chz
    D += off_chz


    # load scales
    # 加载系数
    offs_k = tl.arange(0, HEAD_DIM)


    start_n = pid * BLOCK_N1
    start_m = start_n


    MASK_BLOCK_M1: tl.constexpr = BLOCK_M1 // BLK_SLICE_FACTOR
    offs_n = start_n + tl.arange(0, BLOCK_N1)


    dv = tl.zeros([BLOCK_N1, HEAD_DIM], dtype=tl.float32)
    dk = tl.zeros([BLOCK_N1, HEAD_DIM], dtype=tl.float32)


    # load K and V: they stay in SRAM throughout the inner loop.
    # 加载 K 和 V:它们在整个内部循环过程中保持在 SRAM 中。
    k = tl.load(K + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d)
    v = tl.load(V + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d)


    num_steps = BLOCK_N1 // MASK_BLOCK_M1


    dk, dv = _attn_bwd_dkdv(dk, dv,  #
                            Q, k, v, sm_scale,  #
                            DO,  #
                            M, D,  #
                            stride_tok, stride_d,  #
                            H, N_CTX,  #
                            MASK_BLOCK_M1, BLOCK_N1, HEAD_DIM,  #
                            start_n, start_m, num_steps,  #
                            MASK=True  #
                            )


    start_m += num_steps * MASK_BLOCK_M1
    num_steps = (N_CTX - start_m) // BLOCK_M1


    # Compute dK and dV for non-masked blocks.
    # 计算非掩码块的 dK 和 dV。
    dk, dv = _attn_bwd_dkdv(  #
        dk, dv,  #
        Q, k, v, sm_scale,  #
        DO,  #
        M, D,  #
        stride_tok, stride_d,  #
        H, N_CTX,  #
        BLOCK_M1, BLOCK_N1, HEAD_DIM,  #
        start_n, start_m, num_steps,  #
        MASK=False  #
    )


    dv_ptrs = DV + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d
    tl.store(dv_ptrs, dv)


    # Write back dK.
    # 写回 dK
    dk *= sm_scale
    dk_ptrs = DK + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d
    tl.store(dk_ptrs, dk)


    # THIS BLOCK DOES DQ:
    # 该段计算 DQ
    start_m = pid * BLOCK_M2
    end_n = start_m + BLOCK_M2


    MASK_BLOCK_N2: tl.constexpr = BLOCK_N2 // BLK_SLICE_FACTOR
    offs_m = start_m + tl.arange(0, BLOCK_M2)


    q = tl.load(Q + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d)
    dq = tl.zeros([BLOCK_M2, HEAD_DIM], dtype=tl.float32)
    do = tl.load(DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d)


    m = tl.load(M + offs_m)
    m = m[:, None]


    # Compute dQ for masked (diagonal) blocks.
    # 计算掩码(对角线)块的 dQ。
    # NOTE: This code scans each row of QK^T backward (from right to left,
    # but inside each call to _attn_bwd_dq, from left to right), but that's
    # not due to anything important.  I just wanted to reuse the loop
    # structure for dK & dV above as much as possible.
    # 注意:这段代码逆向扫描每行 QK^T(从右向左,但在每次调用 _attn_bwd_dq 内部,
    # 从左向右),但这并不重要。我只是希望尽可能多地重用上述 dK 和 dV 的循环结构。
    num_steps = BLOCK_M2 // MASK_BLOCK_N2
    dq = _attn_bwd_dq(dq, q, K, V,  #
                      do, m, D,  #
                      stride_tok, stride_d,  #
                      H, N_CTX,  #
                      BLOCK_M2, MASK_BLOCK_N2, HEAD_DIM,  #
                      start_m, end_n - num_steps * MASK_BLOCK_N2, num_steps,  #
                      MASK=True  #
                      )
    end_n -= num_steps * MASK_BLOCK_N2
    # stage 2
    # 阶段 2
    num_steps = end_n // BLOCK_N2
    dq = _attn_bwd_dq(dq, q, K, V,  #
                      do, m, D,  #
                      stride_tok, stride_d,  #
                      H, N_CTX,  #
                      BLOCK_M2, BLOCK_N2, HEAD_DIM,  #
                      start_m, end_n - num_steps * BLOCK_N2, num_steps,  #
                      MASK=False  #
                      )
    # Write back dQ.
    # 写回 dQ
    dq_ptrs = DQ + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
    dq *= LN2
    tl.store(dq_ptrs, dq)




class _attention(torch.autograd.Function):


    @staticmethod
    def forward(ctx, q, k, v, causal, sm_scale):
        # shape constraints
        # 形状约束
        HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
        # when v is in float8_e5m2 it is transposed.
        # 当 v 在 float8_e5m2 格式下时,进行转置。
        HEAD_DIM_V = v.shape[-1]
        assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
        assert HEAD_DIM_K in {16, 32, 64, 128, 256}
        o = torch.empty_like(q)
        stage = 3 if causal else 1
        extra_kern_args = {}
        # Tuning for AMD target
        # 为 AMD 设备调整
        if is_hip():
            waves_per_eu = 3 if HEAD_DIM_K <= 64 else 2
            extra_kern_args = {"waves_per_eu": waves_per_eu, "allow_flush_denorm": True}


        grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
        M = torch.empty((q.shape[0], q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
        _attn_fwd[grid](
            q, k, v, sm_scale, M, o,  #
            q.stride(0), q.stride(1), q.stride(2), q.stride(3),  #
            k.stride(0), k.stride(1), k.stride(2), k.stride(3),  #
            v.stride(0), v.stride(1), v.stride(2), v.stride(3),  #
            o.stride(0), o.stride(1), o.stride(2), o.stride(3),  #
            q.shape[0], q.shape[1],  #
            N_CTX=q.shape[2],  #
            HEAD_DIM=HEAD_DIM_K,  #
            STAGE=stage,  #
            **extra_kern_args)


        ctx.save_for_backward(q, k, v, o, M)
        ctx.grid = grid
        ctx.sm_scale = sm_scale
        ctx.HEAD_DIM = HEAD_DIM_K
        ctx.causal = causal
        return o


    @staticmethod
    def backward(ctx, do):
        q, k, v, o, M = ctx.saved_tensors
        assert do.is_contiguous()
        assert q.stride() == k.stride() == v.stride() == o.stride() == do.stride()
        dq = torch.empty_like(q)
        dk = torch.empty_like(k)
        dv = torch.empty_like(v)
        BATCH, N_HEAD, N_CTX = q.shape[:3]
        PRE_BLOCK = 128
        NUM_WARPS, NUM_STAGES = 4, 5
        BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2 = 32, 128, 128, 32
        BLK_SLICE_FACTOR = 2
        RCP_LN2 = 1.4426950408889634  # = 1.0 / ln(2)
        arg_k = k
        arg_k = arg_k * (ctx.sm_scale * RCP_LN2)
        PRE_BLOCK = 128
        assert N_CTX % PRE_BLOCK == 0
        pre_grid = (N_CTX // PRE_BLOCK, BATCH * N_HEAD)
        delta = torch.empty_like(M)
        _attn_bwd_preprocess[pre_grid](
            o, do,  #
            delta,  #
            BATCH, N_HEAD, N_CTX,  #
            BLOCK_M=PRE_BLOCK, HEAD_DIM=ctx.HEAD_DIM  #
        )
        grid = (N_CTX // BLOCK_N1, 1, BATCH * N_HEAD)
        _attn_bwd[grid](
            q, arg_k, v, ctx.sm_scale, do, dq, dk, dv,  #
            M, delta,  #
            q.stride(0), q.stride(1), q.stride(2), q.stride(3),  #
            N_HEAD, N_CTX,  #
            BLOCK_M1=BLOCK_M1, BLOCK_N1=BLOCK_N1,  #
            BLOCK_M2=BLOCK_M2, BLOCK_N2=BLOCK_N2,  #
            BLK_SLICE_FACTOR=BLK_SLICE_FACTOR,  #
            HEAD_DIM=ctx.HEAD_DIM,  #
            num_warps=NUM_WARPS,  #
            num_stages=NUM_STAGES  #
        )


        return dq, dk, dv, None, None




attention = _attention.apply




@pytest.mark.parametrize("Z, H, N_CTX, HEAD_DIM", [(1, 2, 1024, 64)])
@pytest.mark.parametrize("causal", [True])
def test_op(Z, H, N_CTX, HEAD_DIM, causal, dtype=torch.float16):
    torch.manual_seed(20)
    q = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
    k = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
    v = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
    sm_scale = 0.5
    dout = torch.randn_like(q)
    # reference implementation
    # 参考实现
    M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
    p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
    if causal:
        p[:, :, M == 0] = float("-inf")
    p = torch.softmax(p.float(), dim=-1).half()
    # p = torch.exp(p)
    ref_out = torch.matmul(p, v)
    ref_out.backward(dout)
    ref_dv, v.grad = v.grad.clone(), None
    ref_dk, k.grad = k.grad.clone(), None
    ref_dq, q.grad = q.grad.clone(), None
    # triton implementation
    # triton 实现
    tri_out = attention(q, k, v, causal, sm_scale).half()
    tri_out.backward(dout)
    tri_dv, v.grad = v.grad.clone(), None
    tri_dk, k.grad = k.grad.clone(), None
    tri_dq, q.grad = q.grad.clone(), None
    # compare
    # 比较
    assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0)
    rtol = 0.0
    # Relative tolerance workaround for known hardware limitation of MI200 GPU.
    # 针对 MI200 GPU 已知的硬件限制,使用相对容差的解决方法。
    # For details see https://pytorch.org/docs/stable/notes/numerical_accuracy.html#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
    # 详情见 https://pytorch.org/docs/stable/notes/numerical_accuracy.html#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
    if torch.version.hip is not None and triton.runtime.driver.active.get_current_target().arch == "gfx90a":
        rtol = 1e-2
    assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=rtol)
    assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=rtol)
    assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=rtol)




try:
    from flash_attn.flash_attn_interface import \
        flash_attn_qkvpacked_func as flash_attn_func
    HAS_FLASH = True
except BaseException:
    HAS_FLASH = False


TORCH_HAS_FP8 = hasattr(torch, 'float8_e5m2')
BATCH, N_HEADS, HEAD_DIM = 4, 32, 64
# vary seq length for fixed head and batch=4
# 对于固定的 head 和 batch 为 4,变化序列长度。
configs = []
for mode in ["fwd", "bwd"]:
    for causal in [True, False]:
        if mode == "bwd" and not causal:
            continue
        configs.append(
            triton.testing.Benchmark(
                x_names=["N_CTX"],
                x_vals=[2**i for i in range(10, 15)],
                line_arg="provider",
                line_vals=["triton-fp16"] + (["triton-fp8"] if TORCH_HAS_FP8 else []) +
                (["flash"] if HAS_FLASH else []),
                line_names=["Triton [FP16]"] + (["Triton [FP8]"] if TORCH_HAS_FP8 else []) +
                (["Flash-2"] if HAS_FLASH else []),
                styles=[("red", "-"), ("blue", "-")],
                ylabel="ms",
                plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}-{mode}-causal={causal}",
                args={
                    "H": N_HEADS,
                    "BATCH": BATCH,
                    "HEAD_DIM": HEAD_DIM,
                    "mode": mode,
                    "causal": causal,
                },
            ))




@triton.testing.perf_report(configs)
def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, causal, mode, provider, device="cuda"):
    assert mode in ["fwd", "bwd"]
    warmup = 25
    rep = 100
    dtype = torch.float16
    if "triton" in provider:
        q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
        k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
        v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
        if mode == "fwd" and "fp8" in provider:
            q = q.to(torch.sfloat8_e5m2)
            k = k.to(torch.float8_e5m2)
            v = v.permute(0, 1, 3, 2).contiguous()
            v = v.permute(0, 1, 3, 2)
            v = v.to(torch.float8_e5m2)
        sm_scale = 1.3
        fn = lambda: attention(q, k, v, causal, sm_scale)
        if mode == "bwd":
            o = fn()
            do = torch.randn_like(o)
            fn = lambda: o.backward(do, retain_graph=True)
        ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
    if provider == "flash":
        qkv = torch.randn((BATCH, N_CTX, 3, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
        fn = lambda: flash_attn_func(qkv, causal=causal)
        if mode == "bwd":
            o = fn()
            do = torch.randn_like(o)
            fn = lambda: o.backward(do, retain_graph=True)
        ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
    flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
    total_flops = 2 * flops_per_matmul
    if causal:
        total_flops *= 0.5
    if mode == "bwd":
        total_flops *= 2.5  # 2.0(bwd) + 0.5(recompute)
    return total_flops / ms * 1e-9




if __name__ == "__main__":
    # only works on post-Ampere GPUs right now
    # 目前只适用于安培架构 GPU。
    bench_flash_attention.run(save_path=".", print_data=True)

Download Jupyter notebook: 06-fused-attention.ipynb

Download Python source code: 06-fused-attention.py

Download zipped: 06-fused-attention.zip

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/2245308.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

近源渗透|HID ATTACK从0到1

前言 对于“近源渗透”这一术语&#xff0c;相信大家已经不再感到陌生。它涉及通过伪装、社会工程学等手段&#xff0c;实地侵入企业办公区域&#xff0c;利用内部潜在的攻击面——例如Wi-Fi网络、RFID门禁、暴露的有线网口、USB接口等——获取关键信息&#xff0c;并以隐蔽的…

大数据入门-什么是Flink

这里简单介绍Flink的概念、架构、特性等。至于比较详细的介绍&#xff0c;会单独针对这个组件进行详细介绍&#xff0c;可以关注博客后续阅读。 一、概念 Apache Flink 是一个框架和分布式处理引擎&#xff0c;用于在无边界和有边界数据流上进行有状态的计算。 Flink的四大基…

Excel如何批量导入图片

这篇文章将介绍在Excel中如何根据某列数据&#xff0c;批量的导入与之匹配的图片。 准备工作 如图&#xff0c;我们准备了一张员工信息表以及几张员工的照片 可以看到&#xff0c;照片名称是每个人的名字&#xff0c;与Excel表中的B列&#xff08;姓名&#xff09;对应 的卢易…

9个最佳WordPress PDF插件(查看器、嵌入和下载)

在过去的几年里&#xff0c;我们一直在使用不同的 PDF 插件在我们的网站上创建、编辑和嵌入文档。 然而&#xff0c;经过多次尝试和错误&#xff0c;我们意识到并不是每个插件都是相同的。事实上&#xff0c;为您的企业或电子商务网站选择合适的 PDF 插件可能是一项艰巨的任务…

java-贪心算法

1. 霍夫曼编码&#xff08;Huffman Coding&#xff09; 描述&#xff1a; 霍夫曼编码是一种使用变长编码表对数据进行编码的算法&#xff0c;由David A. Huffman在1952年发明。它是一种贪心算法&#xff0c;用于数据压缩。霍夫曼编码通过构建一个二叉树&#xff08;霍夫曼树&a…

Seatunnel运行时报错Caused by: java.lang.NoClassDefFoundError: com/mysql/cj/MysqlType

报错 [] 2024-11-21 16:46:27,526 ERROR org.apache.seatunnel.core.starter.SeaTunnel - Fatal Error, [] 2024-11-21 16:46:27,526 ERROR org.apache.seatunnel.core.starter.SeaTunnel - Please submit bug report in https://github.com/apache/seatunnel/issues[] 2024-11…

vue自定义指令--一键复制

vue项目中想要实现点击按钮一键复制&#xff0c;可以通过vue的自定义指令directive来实现。 一、新建directive.js文件 新建directive.js文件&#xff0c;用于定义所有的自定义指令。 import { Toast } from vant;const directive {// 一键复制copy:{bind (el, { value }) …

AI 大模型重塑软件开发的未来

✅作者简介&#xff1a;2022年博客新星 第八。热爱国学的Java后端开发者&#xff0c;修心和技术同步精进。 &#x1f34e;个人主页&#xff1a;Java Fans的博客 &#x1f34a;个人信条&#xff1a;不迁怒&#xff0c;不贰过。小知识&#xff0c;大智慧。 &#x1f49e;当前专栏…

python: generator model using sql server 2019

設計或生成好數據庫&#xff0c;可以生成自己設計好的框架項目 # encoding: utf-8 # 版权所有 &#xff1a;2024 ©涂聚文有限公司 # 许可信息查看 &#xff1a;言語成了邀功盡責的功臣&#xff0c;還需要行爲每日來值班嗎 # 描述&#xff1a; : 生成实体 # Author …

使用redis-shake工具进行redis的数据同步

前言&#xff1a; 工作中将常遇到测试环境和正式环境的数据同步或者需要进行数据迁移&#xff0c;对于mysql数据库的方案倒是不少&#xff0c;但是redis中如何快速便捷的迁移呢&#xff1f;答案是阿里云提供的:redis-shake RedisShake是阿里云基于豌豆荚开源的redis-port进行…

轻松实现文件远程传输:使用PicoShare搭建轻量级文件共享系统

文章目录 前言1. 本地安装Docker2. 本地部署PicoShare3. 如何使用PicoShare4. 公网远程访问本地 PicoShare4.1 内网穿透工具安装4.2 创建远程连接公网地址 5. 固定PicoShare公网地址 前言 什么是PicoShare&#xff1f; PicoShare 是一个由 Go 开发的轻量级开源共享文件系统。…

路由缓存后跳转到新路由时,上一路由中的tip信息框不销毁问题解决

上一路由tip信息框不销毁问题解决 路由缓存篇问题描述及截图解决思路关键代码 路由缓存篇 传送门 问题描述及截图 路由缓存后跳转新路由时&#xff0c;上一个路由的tip信息框没销毁。 解决思路 在全局路由守卫中获取DOM元素&#xff0c;通过css去控制 关键代码 修改文…

iOS 18.2 Beta 4开发者预览版发布,相机新增辅助功能

昨天刚连发iOS 18.1.1正式版、iOS 17.7.2正式版&#xff0c;今天凌晨&#xff0c;苹果就又发布iOS 18.2 Beta 4&#xff0c;据了解iOS18.2正式版将在 12 月初发布。那么这次的更新又有哪些变化呢&#xff1f;下面我们就来一起了解一下。 iOS 18.2 Beta 4的版本号为&#xff1a;…

PCB 间接雷击模拟

雷击是一种危险的静电放电事件&#xff0c;其中两个带电区域会瞬间释放高达 1 千兆焦耳的能量。雷击就像一个短暂而巨大的电流脉冲&#xff0c;会对建筑物和电子设备造成严重损坏。雷击可分为直接和间接两类&#xff0c;其中间接影响是由于感应能量耦合到靠近雷击位置的物体。间…

易语言学习-cnblog

易语言数据类型 数值转换命令&#xff08;自己学&#xff09; 数值到大写&#xff08;&#xff09;将一个数值转换到中文读法&#xff0c;第二个参数为是否为简体。 数值到大写&#xff08;123.44&#xff0c;假&#xff09; 猜测结果 数值到金额&#xff08;&#xff09;将双…

接口小练习

文章目录 1.字符串最后一个单词的长度2.字符串中的第一个唯一字符3.验证回文串 1.字符串最后一个单词的长度 计算字符串最后一个单词的长度&#xff0c;单词以空格隔开&#xff0c;字符串长度小于5000。&#xff08;注&#xff1a;字符串末尾不以空格为结尾&#xff09; 输入描…

26届JAVA 学习日记——Day14

2024.11.21周四 今天仍然在补充基础知识&#xff0c;项目进度较慢&#xff0c;明天再回顾一下Java8的新特性。 八股 操作系统 什么是缓冲区溢出&#xff1f; 缓冲区溢出是一种常见的计算机安全漏洞&#xff0c;它发生在程序试图将过多的数据写入缓冲区&#xff08;即一块用…

Element UI 组件库详解【Vue】

文章目录 一、引言二、安装并使用1. 安装2. 使用 三、常见组件说明1. 基础组件2. 布局组件3. 布局容器4. 选择框组件5. 输入框组件6. 下拉框组件7. 日期选择器8. 上传组件9. 表单组件10. 警告组件11. 提示组件12. 表格组件 一、引言 官方网站&#xff0c;element.eleme.cn El…

【腾讯云产品最佳实践】腾讯云CVM入门技术与实践:通过腾讯云快速构建云上应用

目录 前言 什么是腾讯云CVM&#xff1f; 腾讯云CVM的技术优势 基于最佳技术实践&#xff0c;使用腾讯云CVM搭建应用 1. 开通CVM实例 2. 连接CVM实例 3. 配置Web环境 4. 部署PHP应用 腾讯云CVM行业应用案例&#xff1a;电商平台的双十一攻略 1. 弹性伸缩解决高并发问题…

2024信创数据库TOP30之达梦DM8

近年来&#xff0c;中国信创产业快速崛起&#xff0c;其中数据库作为基础软件的重要组成部分&#xff0c;发挥了至关重要的作用。近日&#xff0c;由DBC联合CIW/CIS共同发布的“2024信创数据库TOP30”榜单正式揭晓&#xff0c;汇聚了国内顶尖的数据库企业及产品&#xff0c;成为…