UTILS#

Classes#

class models.tak_utils.utils.FisherLoader(fisher_cache, dataset_name, device, fp_precision='fp32')[source]#

Bases: object

load_diff_ekfac(task_id, only_counts=False)[source]#
Return type:

tuple[dict[str, Tensor], dict[str, Tensor], dict[str, Tensor], int, int]

load_ekfac(task_id, only_counts=False)[source]#
Return type:

tuple[dict[str, Tensor], dict[str, Tensor], dict[str, Tensor], int, int]

load_kfac(task_id: int, only_counts: Literal[True]) tuple[int, int][source]#
load_kfac(task_id: int, only_counts: Literal[False] = False) tuple[dict[str, Tensor], dict[str, Tensor], dict[str, Tensor], int, int]
store_kfac(task_id, ggT, aaT, ffT, num_ggT, num_aaT)[source]#
class models.tak_utils.utils.OptimizerBuilder(cmd_args)[source]#

Bases: object

build_opt_and_sched(all_params, num_batches)[source]#
build_opt_and_sched_multiple_lr(params_group_1, params_group_2, num_batches)[source]#

Functions#

models.tak_utils.utils.add_clip_args(parser)[source]#
models.tak_utils.utils.assign_learning_rate(param_group, new_lr)[source]#
models.tak_utils.utils.compute_acc_on_last_task(model, dataset)[source]#
models.tak_utils.utils.cosine_lr(optimizer, base_lrs, warmup_length, steps, min_lr)[source]#
models.tak_utils.utils.get_delta_w_backbone(named_params, delta_w, delta_w_names, training_type, device)[source]#
models.tak_utils.utils.get_delta_w_parameterlist(named_params, delta_w, delta_w_names, peft_type, device)[source]#
models.tak_utils.utils.get_parameter(shape, device, type_init='orto', transpose=False, requires_grad=True)[source]#
models.tak_utils.utils.get_params(net, features=True, classifier=False, offset_1=-1, offset_2=-1)[source]#
Return type:

Tensor

models.tak_utils.utils.make_psd(x, to64=False)[source]#
models.tak_utils.utils.replace_non_dynamically_quantizable_linear(module)[source]#

Recursively replace all NonDynamicallyQuantizableLinear layers with Linear layers in a model.

models.tak_utils.utils.set_params(net, new_params, features=True, classifier=False, offset_1=-1, offset_2=-1)[source]#
models.tak_utils.utils.set_requires_grad_to(model, namevars, mode)[source]#
models.tak_utils.utils.step_lr_decay(optimizer, base_lrs, warmup_length, steps)[source]#
models.tak_utils.utils.add_clip_args(parser)[source]#
models.tak_utils.utils.assign_learning_rate(param_group, new_lr)[source]#
models.tak_utils.utils.compute_acc_on_last_task(model, dataset)[source]#
models.tak_utils.utils.cosine_lr(optimizer, base_lrs, warmup_length, steps, min_lr)[source]#
models.tak_utils.utils.get_delta_w_backbone(named_params, delta_w, delta_w_names, training_type, device)[source]#
models.tak_utils.utils.get_delta_w_parameterlist(named_params, delta_w, delta_w_names, peft_type, device)[source]#
models.tak_utils.utils.get_parameter(shape, device, type_init='orto', transpose=False, requires_grad=True)[source]#
models.tak_utils.utils.get_params(net, features=True, classifier=False, offset_1=-1, offset_2=-1)[source]#
Return type:

Tensor

models.tak_utils.utils.make_psd(x, to64=False)[source]#
models.tak_utils.utils.replace_non_dynamically_quantizable_linear(module)[source]#

Recursively replace all NonDynamicallyQuantizableLinear layers with Linear layers in a model.

models.tak_utils.utils.set_params(net, new_params, features=True, classifier=False, offset_1=-1, offset_2=-1)[source]#
models.tak_utils.utils.set_requires_grad_to(model, namevars, mode)[source]#
models.tak_utils.utils.step_lr_decay(optimizer, base_lrs, warmup_length, steps)[source]#