若API未标明支持情况,则代表该API的支持情况待验证。
API名称 |
是否支持 |
限制与说明 |
---|---|---|
torch.optim.Optimizer |
||
Optimizer.add_param_group |
是 |
|
Optimizer.load_state_dict |
是 |
|
Optimizer.state_dict |
是 |
|
Optimizer.step |
是 |
|
Optimizer.zero_grad |
是 |
|
torch.optim.Adadelta |
否 |
|
torch.optim.Adadelta.add_param_group |
是 |
|
torch.optim.Adadelta.load_state_dict |
是 |
|
torch.optim.Adadelta.register_step_post_hook |
否 |
|
torch.optim.Adadelta.register_step_pre_hook |
否 |
|
torch.optim.Adadelta.state_dict |
是 |
|
torch.optim.Adadelta.zero_grad |
是 |
|
torch.optim.Adagrad |
否 |
|
torch.optim.Adagrad.add_param_group |
是 |
|
torch.optim.Adagrad.load_state_dict |
是 |
|
torch.optim.Adagrad.register_step_post_hook |
否 |
|
torch.optim.Adagrad.register_step_pre_hook |
否 |
|
torch.optim.Adagrad.state_dict |
是 |
|
torch.optim.Adagrad.zero_grad |
是 |
|
torch.optim.Adam |
否 |
|
torch.optim.Adam.add_param_group |
是 |
|
torch.optim.Adam.load_state_dict |
是 |
|
torch.optim.Adam.register_step_post_hook |
否 |
|
torch.optim.Adam.register_step_pre_hook |
否 |
|
torch.optim.Adam.state_dict |
是 |
|
torch.optim.Adam.zero_grad |
是 |
|
torch.optim.AdamW |
否 |
|
torch.optim.AdamW.add_param_group |
||
torch.optim.AdamW.load_state_dict |
||
torch.optim.AdamW.register_step_post_hook |
否 |
|
torch.optim.AdamW.register_step_pre_hook |
否 |
|
torch.optim.AdamW.state_dict |
||
torch.optim.AdamW.zero_grad |
||
torch.optim.SparseAdam |
否 |
|
torch.optim.SparseAdam.add_param_group |
||
torch.optim.SparseAdam.load_state_dict |
||
torch.optim.SparseAdam.register_step_post_hook |
否 |
|
torch.optim.SparseAdam.register_step_pre_hook |
否 |
|
torch.optim.SparseAdam.state_dict |
||
torch.optim.SparseAdam.step |
||
torch.optim.SparseAdam.zero_grad |
||
torch.optim.Adamax |
否 |
|
torch.optim.Adamax.add_param_group |
||
torch.optim.Adamax.load_state_dict |
||
torch.optim.Adamax.register_step_post_hook |
否 |
|
torch.optim.Adamax.register_step_pre_hook |
否 |
|
torch.optim.Adamax.state_dict |
||
torch.optim.Adamax.zero_grad |
||
torch.optim.ASGD |
否 |
|
torch.optim.ASGD.add_param_group |
||
torch.optim.ASGD.load_state_dict |
||
torch.optim.ASGD.register_step_post_hook |
否 |
|
torch.optim.ASGD.register_step_pre_hook |
否 |
|
torch.optim.ASGD.state_dict |
||
torch.optim.ASGD.zero_grad |
||
torch.optim.LBFGS |
||
torch.optim.LBFGS.add_param_group |
||
torch.optim.LBFGS.load_state_dict |
||
torch.optim.LBFGS.register_step_post_hook |
否 |
|
torch.optim.LBFGS.register_step_pre_hook |
否 |
|
torch.optim.LBFGS.state_dict |
||
torch.optim.LBFGS.step |
||
torch.optim.LBFGS.zero_grad |
||
torch.optim.NAdam |
否 |
|
torch.optim.NAdam.add_param_group |
||
torch.optim.NAdam.load_state_dict |
||
torch.optim.NAdam.register_step_post_hook |
否 |
|
torch.optim.NAdam.register_step_pre_hook |
否 |
|
torch.optim.NAdam.state_dict |
||
torch.optim.NAdam.zero_grad |
||
torch.optim.RAdam |
否 |
|
torch.optim.RAdam.add_param_group |
||
torch.optim.RAdam.load_state_dict |
||
torch.optim.RAdam.register_step_post_hook |
否 |
|
torch.optim.RAdam.register_step_pre_hook |
否 |
|
torch.optim.RAdam.state_dict |
||
torch.optim.RAdam.zero_grad |
||
torch.optim.RMSprop |
否 |
|
torch.optim.RMSprop.add_param_group |
||
torch.optim.RMSprop.load_state_dict |
||
torch.optim.RMSprop.register_step_post_hook |
否 |
|
torch.optim.RMSprop.register_step_pre_hook |
否 |
|
torch.optim.RMSprop.state_dict |
||
torch.optim.RMSprop.zero_grad |
||
torch.optim.Rprop |
否 |
|
torch.optim.Rprop.add_param_group |
||
torch.optim.Rprop.load_state_dict |
||
torch.optim.Rprop.register_step_post_hook |
否 |
|
torch.optim.Rprop.register_step_pre_hook |
否 |
|
torch.optim.Rprop.state_dict |
||
torch.optim.Rprop.zero_grad |
||
torch.optim.SGD |
否 |
|
torch.optim.SGD.add_param_group |
||
torch.optim.SGD.load_state_dict |
||
torch.optim.SGD.register_step_post_hook |
否 |
|
torch.optim.SGD.register_step_pre_hook |
否 |
|
torch.optim.SGD.state_dict |
||
torch.optim.SGD.zero_grad |
||
torch.optim.lr_scheduler.LambdaLR |
是 |
|
torch.optim.lr_scheduler.LambdaLR.get_last_lr |
||
torch.optim.lr_scheduler.LambdaLR.load_state_dict |
||
torch.optim.lr_scheduler.LambdaLR.print_lr |
||
torch.optim.lr_scheduler.LambdaLR.state_dict |
||
torch.optim.lr_scheduler.MultiplicativeLR |
||
torch.optim.lr_scheduler.MultiplicativeLR.get_last_lr |
||
torch.optim.lr_scheduler.MultiplicativeLR.load_state_dict |
||
torch.optim.lr_scheduler.MultiplicativeLR.print_lr |
||
torch.optim.lr_scheduler.MultiplicativeLR.state_dict |
||
torch.optim.lr_scheduler.StepLR |
||
torch.optim.lr_scheduler.StepLR.get_last_lr |
||
torch.optim.lr_scheduler.StepLR.load_state_dict |
||
torch.optim.lr_scheduler.StepLR.print_lr |
||
torch.optim.lr_scheduler.StepLR.state_dict |
||
torch.optim.lr_scheduler.MultiStepLR |
||
torch.optim.lr_scheduler.MultiStepLR.get_last_lr |
||
torch.optim.lr_scheduler.MultiStepLR.load_state_dict |
||
torch.optim.lr_scheduler.MultiStepLR.print_lr |
||
torch.optim.lr_scheduler.MultiStepLR.state_dict |
||
torch.optim.lr_scheduler.ConstantLR |
||
torch.optim.lr_scheduler.ConstantLR.get_last_lr |
||
torch.optim.lr_scheduler.ConstantLR.load_state_dict |
||
torch.optim.lr_scheduler.ConstantLR.print_lr |
||
torch.optim.lr_scheduler.ConstantLR.state_dict |
||
torch.optim.lr_scheduler.LinearLR |
||
torch.optim.lr_scheduler.LinearLR.get_last_lr |
||
torch.optim.lr_scheduler.LinearLR.load_state_dict |
||
torch.optim.lr_scheduler.LinearLR.print_lr |
||
torch.optim.lr_scheduler.LinearLR.state_dict |
||
torch.optim.lr_scheduler.ExponentialLR |
||
torch.optim.lr_scheduler.ExponentialLR.get_last_lr |
||
torch.optim.lr_scheduler.ExponentialLR.load_state_dict |
||
torch.optim.lr_scheduler.ExponentialLR.print_lr |
||
torch.optim.lr_scheduler.ExponentialLR.state_dict |
||
torch.optim.lr_scheduler.PolynomialLR |
否 |
|
torch.optim.lr_scheduler.PolynomialLR.get_last_lr |
否 |
|
torch.optim.lr_scheduler.PolynomialLR.load_state_dict |
否 |
|
torch.optim.lr_scheduler.PolynomialLR.print_lr |
否 |
|
torch.optim.lr_scheduler.PolynomialLR.state_dict |
否 |
|
torch.optim.lr_scheduler.CosineAnnealingLR |
||
torch.optim.lr_scheduler.CosineAnnealingLR.get_last_lr |
||
torch.optim.lr_scheduler.CosineAnnealingLR.load_state_dict |
||
torch.optim.lr_scheduler.CosineAnnealingLR.print_lr |
||
torch.optim.lr_scheduler.CosineAnnealingLR.state_dict |
||
torch.optim.lr_scheduler.ChainedScheduler |
||
torch.optim.lr_scheduler.ChainedScheduler.get_last_lr |
||
torch.optim.lr_scheduler.ChainedScheduler.load_state_dict |
||
torch.optim.lr_scheduler.ChainedScheduler.print_lr |
||
torch.optim.lr_scheduler.ChainedScheduler.state_dict |
||
torch.optim.lr_scheduler.SequentialLR |
||
torch.optim.lr_scheduler.SequentialLR.get_last_lr |
||
torch.optim.lr_scheduler.SequentialLR.load_state_dict |
||
torch.optim.lr_scheduler.SequentialLR.print_lr |
||
torch.optim.lr_scheduler.SequentialLR.state_dict |
||
torch.optim.lr_scheduler.ReduceLROnPlateau |
||
torch.optim.lr_scheduler.CyclicLR |
||
torch.optim.lr_scheduler.CyclicLR.get_last_lr |
||
torch.optim.lr_scheduler.CyclicLR.get_lr |
||
torch.optim.lr_scheduler.CyclicLR.print_lr |
||
torch.optim.lr_scheduler.OneCycleLR |
是 |
|
torch.optim.lr_scheduler.OneCycleLR.get_last_lr |
||
torch.optim.lr_scheduler.OneCycleLR.load_state_dict |
||
torch.optim.lr_scheduler.OneCycleLR.print_lr |
||
torch.optim.lr_scheduler.OneCycleLR.state_dict |
||
torch.optim.lr_scheduler.CosineAnnealingWarmRestarts |
||
torch.optim.lr_scheduler.CosineAnnealingWarmRestarts.get_last_lr |
||
torch.optim.lr_scheduler.CosineAnnealingWarmRestarts.load_state_dict |
||
torch.optim.lr_scheduler.CosineAnnealingWarmRestarts.print_lr |
||
torch.optim.lr_scheduler.CosineAnnealingWarmRestarts.state_dict |
||
torch.optim.lr_scheduler.CosineAnnealingWarmRestarts.step |