torch.nn.functional

若API未标明支持情况,则代表该API的支持情况待验证。

API名称

是否支持

限制与说明

torch.nn.functional.conv1d

支持fp32,fp16

torch.nn.functional.conv2d

支持fp16,fp32

torch.nn.functional.conv3d

  

torch.nn.functional.conv_transpose1d

支持fp32

torch.nn.functional.conv_transpose2d

支持fp32,fp16

torch.nn.functional.conv_transpose3d

  

torch.nn.functional.unfold

     

torch.nn.functional.fold

     

torch.nn.functional.avg_pool1d

  

torch.nn.functional.avg_pool2d

  

torch.nn.functional.avg_pool3d

  

torch.nn.functional.max_pool1d

  

torch.nn.functional.max_pool2d

     

torch.nn.functional.max_pool3d

  

torch.nn.functional.max_unpool1d

支持fp16,fp32,fp64,int8,uint8,int32,int64

torch.nn.functional.max_unpool2d

支持fp16,fp32,fp64,int8,uint8,int32,int64

torch.nn.functional.max_unpool3d

     

torch.nn.functional.lp_pool1d

  

torch.nn.functional.lp_pool2d

  

torch.nn.functional.adaptive_max_pool1d

     

torch.nn.functional.adaptive_max_pool2d

     

torch.nn.functional.adaptive_max_pool3d

  

torch.nn.functional.adaptive_avg_pool1d

支持fp32,fp16

torch.nn.functional.adaptive_avg_pool2d

支持fp32,fp16

torch.nn.functional.adaptive_avg_pool3d

     

torch.nn.functional.fractional_max_pool2d

  

torch.nn.functional.fractional_max_pool3d

  

torch.nn.functional.scaled_dot_product_attention

  

torch.nn.functional.threshold

支持fp16,fp32,int8,int16,uint8,int32,int64

torch.nn.functional.threshold_

支持fp16,fp32,int8,int16,uint8,int32,int64

torch.nn.functional.relu

支持fp16,fp32,int8,uint8,int32,int64

torch.nn.functional.relu_

支持fp16,fp32,int8,uint8,int32,int64

torch.nn.functional.hardtanh

支持fp32,fp16,int8,int16,int32,int64

torch.nn.functional.hardtanh_

支持fp32,fp16,int8,int16,int32,int64

torch.nn.functional.hardswish

支持fp16,fp32

torch.nn.functional.relu6

支持fp32,fp16,uint8,int8,int16,int32,int64

torch.nn.functional.elu

支持fp32,fp16

torch.nn.functional.elu_

支持fp32,fp16

torch.nn.functional.selu

支持fp32,fp16

torch.nn.functional.celu

支持fp32,fp16

torch.nn.functional.leaky_relu

支持fp16,fp32,fp64

torch.nn.functional.leaky_relu_

支持fp16,fp32,fp64

torch.nn.functional.prelu

支持fp32,fp16

torch.nn.functional.rrelu

支持bf16,fp32,fp16

torch.nn.functional.rrelu_

     

torch.nn.functional.glu

支持fp32,fp16

torch.nn.functional.gelu

支持bf16,fp32,fp16

torch.nn.functional.logsigmoid

支持fp32,fp16

torch.nn.functional.hardshrink

支持fp32,fp16

torch.nn.functional.tanhshrink

     

torch.nn.functional.softsign

支持bf16,fp32,fp16,uint8,int8,int16,int32,int64

torch.nn.functional.softplus

支持fp16,fp32

torch.nn.functional.softmin

     

torch.nn.functional.softmax

支持bf16,fp32,fp16

torch.nn.functional.softshrink

支持fp32,fp16

torch.nn.functional.gumbel_softmax

     

torch.nn.functional.log_softmax

支持bf16,fp32,fp16

torch.nn.functional.tanh

支持fp16,fp32,int8,int16,uint8,int32,int64,bool

torch.nn.functional.sigmoid

支持fp32,fp16,uint8,int8,int16,int32,int64,bool,complex64,complex128

torch.nn.functional.hardsigmoid

支持fp16,fp32

torch.nn.functional.silu

支持fp16,fp32,int64,bool

torch.nn.functional.mish

  

torch.nn.functional.batch_norm

支持fp32,fp16

torch.nn.functional.group_norm

  

torch.nn.functional.instance_norm

     

torch.nn.functional.layer_norm

支持bf16,fp32,fp16

torch.nn.functional.local_response_norm

  

torch.nn.functional.normalize

  

torch.nn.functional.linear

支持fp32,fp16

torch.nn.functional.bilinear

支持fp32,fp16

torch.nn.functional.dropout

     

torch.nn.functional.alpha_dropout

支持bf16,fp32,fp16,uint8,int8,int16,int32,int64,bool,complex64,complex128

torch.nn.functional.feature_alpha_dropout

支持bf16,fp32,fp16,uint8,int8,int16,int32,int64,bool,complex64,complex128

torch.nn.functional.dropout1d

  

torch.nn.functional.dropout2d

     

torch.nn.functional.dropout3d

     

torch.nn.functional.embedding

支持int32,int64

torch.nn.functional.embedding_bag

     

torch.nn.functional.one_hot

支持int32,int64

torch.nn.functional.pairwise_distance

     

torch.nn.functional.cosine_similarity

支持fp32,fp16

torch.nn.functional.pdist

     

torch.nn.functional.binary_cross_entropy

支持fp32,fp16

torch.nn.functional.binary_cross_entropy_with_logits

支持fp32,fp16

torch.nn.functional.poisson_nll_loss

支持bf16,fp32,fp16,int64

torch.nn.functional.cosine_embedding_loss

     

torch.nn.functional.cross_entropy

支持bf16,fp32,fp16

torch.nn.functional.ctc_loss

支持fp32

torch.nn.functional.gaussian_nll_loss

支持bf16,fp32,fp16,uint8,int8,int16,int32,int64

torch.nn.functional.hinge_embedding_loss

     

torch.nn.functional.kl_div

支持fp16,fp32

torch.nn.functional.l1_loss

支持fp16,fp32,int64

torch.nn.functional.mse_loss

支持fp32,fp16

torch.nn.functional.margin_ranking_loss

支持bf16,fp32,fp16

torch.nn.functional.multilabel_margin_loss

     

torch.nn.functional.multilabel_soft_margin_loss

  

torch.nn.functional.multi_margin_loss

可以走CPU实现

torch.nn.functional.nll_loss

支持fp32

torch.nn.functional.huber_loss

  

torch.nn.functional.smooth_l1_loss

支持fp32,fp16

torch.nn.functional.soft_margin_loss

  

torch.nn.functional.triplet_margin_loss

  

torch.nn.functional.triplet_margin_with_distance_loss

  

torch.nn.functional.pixel_shuffle

支持bf16,fp16,fp32,fp64,int8,uint8,int16,int32,int64,bool

torch.nn.functional.pixel_unshuffle

支持bf16,fp16,fp32,fp64,int8,uint8,int16,int32,int64,bool

torch.nn.functional.pad

支持bf16,fp32,fp16,uint8,int16,int32,int64,bool

在输入x为六维以上时可能会出现性能下降问题

torch.nn.functional.interpolate

支持fp16,fp32,fp64

只支持mode = nearest

torch.nn.functional.upsample

支持fp16,fp32,fp64

只支持mode = nearest

torch.nn.functional.upsample_nearest

支持fp16,fp32,fp64

只支持3-5维

torch.nn.functional.upsample_bilinear

支持fp32,fp16

torch.nn.functional.grid_sample

     

torch.nn.functional.affine_grid

支持fp16,fp32

torch.nn.parallel.data_parallel