Spaces:
Sleeping
Sleeping
| # UniRepLKNet: A Universal Perception Large-Kernel ConvNet for Audio, Video, Point Cloud, Time-Series and Image Recognition | |
| # Github source: https://github.com/AILab-CVC/UniRepLKNet | |
| # Licensed under The Apache License 2.0 License [see LICENSE for details] | |
| # Based on RepLKNet, ConvNeXt, timm, DINO and DeiT code bases | |
| # https://github.com/DingXiaoH/RepLKNet-pytorch | |
| # https://github.com/facebookresearch/ConvNeXt | |
| # https://github.com/rwightman/pytorch-image-models/tree/master/timm | |
| # https://github.com/facebookresearch/deit/ | |
| # https://github.com/facebookresearch/dino | |
| # --------------------------------------------------------' | |
| import torch | |
| from torch import optim as optim | |
| from timm.optim.adafactor import Adafactor | |
| from timm.optim.adahessian import Adahessian | |
| from timm.optim.adamp import AdamP | |
| from timm.optim.lookahead import Lookahead | |
| from timm.optim.nadam import Nadam | |
| from timm.optim.radam import RAdam | |
| from timm.optim.rmsprop_tf import RMSpropTF | |
| from timm.optim.sgdp import SGDP | |
| import json | |
| try: | |
| from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD | |
| has_apex = True | |
| except ImportError: | |
| has_apex = False | |
| def get_num_layer_for_convnext(var_name): | |
| """ | |
| Divide [3, 3, 27, 3] layers into 12 groups; each group is three | |
| consecutive blocks, including possible neighboring downsample layers; | |
| adapted from https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py | |
| """ | |
| num_max_layer = 12 | |
| if var_name.startswith("downsample_layers"): | |
| stage_id = int(var_name.split('.')[1]) | |
| if stage_id == 0: | |
| layer_id = 0 | |
| elif stage_id == 1 or stage_id == 2: | |
| layer_id = stage_id + 1 | |
| elif stage_id == 3: | |
| layer_id = 12 | |
| return layer_id | |
| elif var_name.startswith("stages"): | |
| stage_id = int(var_name.split('.')[1]) | |
| block_id = int(var_name.split('.')[2]) | |
| if stage_id == 0 or stage_id == 1: | |
| layer_id = stage_id + 1 | |
| elif stage_id == 2: | |
| layer_id = 3 + block_id // 3 | |
| elif stage_id == 3: | |
| layer_id = 12 | |
| return layer_id | |
| else: | |
| return num_max_layer + 1 | |
| class LayerDecayValueAssigner(object): | |
| def __init__(self, values): | |
| self.values = values | |
| def get_scale(self, layer_id): | |
| return self.values[layer_id] | |
| def get_layer_id(self, var_name): | |
| return get_num_layer_for_convnext(var_name) | |
| def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None): | |
| parameter_group_names = {} | |
| parameter_group_vars = {} | |
| for name, param in model.named_parameters(): | |
| if not param.requires_grad: | |
| continue # frozen weights | |
| if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: | |
| group_name = "no_decay" | |
| this_weight_decay = 0. | |
| else: | |
| group_name = "decay" | |
| this_weight_decay = weight_decay | |
| if get_num_layer is not None: | |
| layer_id = get_num_layer(name) | |
| group_name = "layer_%d_%s" % (layer_id, group_name) | |
| else: | |
| layer_id = None | |
| if group_name not in parameter_group_names: | |
| if get_layer_scale is not None: | |
| scale = get_layer_scale(layer_id) | |
| else: | |
| scale = 1. | |
| parameter_group_names[group_name] = { | |
| "weight_decay": this_weight_decay, | |
| "params": [], | |
| "lr_scale": scale | |
| } | |
| parameter_group_vars[group_name] = { | |
| "weight_decay": this_weight_decay, | |
| "params": [], | |
| "lr_scale": scale | |
| } | |
| parameter_group_vars[group_name]["params"].append(param) | |
| parameter_group_names[group_name]["params"].append(name) | |
| print("Param groups = %s" % json.dumps(parameter_group_names, indent=2)) | |
| return list(parameter_group_vars.values()) | |
| def create_optimizer(model, weight_decay, lr, opt, get_num_layer=None, opt_eps=None, opt_betas=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None, momentum = 0.9): | |
| opt_lower = opt.lower() | |
| weight_decay = weight_decay | |
| # if weight_decay and filter_bias_and_bn: | |
| if filter_bias_and_bn: | |
| skip = {} | |
| if skip_list is not None: | |
| skip = skip_list | |
| elif hasattr(model, 'no_weight_decay'): | |
| skip = model.no_weight_decay() | |
| parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale) | |
| weight_decay = 0. | |
| else: | |
| parameters = model.parameters() | |
| if 'fused' in opt_lower: | |
| assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' | |
| opt_args = dict(lr=lr, weight_decay=weight_decay) | |
| if opt_eps is not None: | |
| opt_args['eps'] = opt_eps | |
| if opt_betas is not None: | |
| opt_args['betas'] = opt_betas | |
| opt_split = opt_lower.split('_') | |
| opt_lower = opt_split[-1] | |
| if opt_lower == 'sgd' or opt_lower == 'nesterov': | |
| opt_args.pop('eps', None) | |
| optimizer = optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args) | |
| elif opt_lower == 'momentum': | |
| opt_args.pop('eps', None) | |
| optimizer = optim.SGD(parameters, momentum=momentum, nesterov=False, **opt_args) | |
| elif opt_lower == 'adam': | |
| optimizer = optim.Adam(parameters, **opt_args) | |
| if opt_lower == 'adamw': | |
| optimizer = optim.AdamW(parameters, **opt_args) | |
| elif opt_lower == 'nadam': | |
| optimizer = Nadam(parameters, **opt_args) | |
| elif opt_lower == 'radam': | |
| optimizer = RAdam(parameters, **opt_args) | |
| elif opt_lower == 'adamp': | |
| optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) | |
| elif opt_lower == 'sgdp': | |
| optimizer = SGDP(parameters, momentum=momentum, nesterov=True, **opt_args) | |
| elif opt_lower == 'adadelta': | |
| optimizer = optim.Adadelta(parameters, **opt_args) | |
| elif opt_lower == 'adahessian': | |
| optimizer = Adahessian(parameters, **opt_args) | |
| elif opt_lower == 'rmsprop': | |
| optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=momentum, **opt_args) | |
| elif opt_lower == 'rmsproptf': | |
| optimizer = RMSpropTF(parameters, alpha=0.9, momentum=momentum, **opt_args) | |
| elif opt_lower == 'fusedsgd': | |
| opt_args.pop('eps', None) | |
| optimizer = FusedSGD(parameters, momentum=momentum, nesterov=True, **opt_args) | |
| elif opt_lower == 'fusedmomentum': | |
| opt_args.pop('eps', None) | |
| optimizer = FusedSGD(parameters, momentum=momentum, nesterov=False, **opt_args) | |
| elif opt_lower == 'fusedadam': | |
| optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) | |
| elif opt_lower == 'fusedadamw': | |
| optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) | |
| elif opt_lower == 'fusedlamb': | |
| optimizer = FusedLAMB(parameters, **opt_args) | |
| elif opt_lower == 'fusednovograd': | |
| opt_args.setdefault('betas', (0.95, 0.98)) | |
| optimizer = FusedNovoGrad(parameters, **opt_args) | |
| else: | |
| assert False and "Invalid optimizer" | |
| if len(opt_split) > 1: | |
| if opt_split[0] == 'lookahead': | |
| optimizer = Lookahead(optimizer) | |
| return optimizer | |