def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple( "GRU", [ "input_size", "hidden_size", "num_layers", "bias", "batch_first", "dropout", "bidirectional", ], ) self.params = self.template_fn( input_size=IntParam(name="input_size", default=1), hidden_size=Param(name="hidden_size", default=None), num_layers=IntParam(name="num_layers", default=1), bias=BinaryParam(name="bias", default=True, true_prob=0.5), batch_first=BinaryParam(name="batch_first", default=False, true_prob=0.5), dropout=IntParam(name="dropout", default=0.0), bidirectional=BinaryParam(name="bidirectional", default=False, true_prob=0.5), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple( "SyncBatchNorm", [ "num_features", "eps", "momentum", "affine", "track_running_stats", "process_group", ], ) self.params = self.template_fn( num_features=Param(name="num_features", default=None), eps=FloatParam(name="eps", default=1e-05), momentum=FloatParam(name="momentum", default=0.1), affine=BinaryParam(name="affine", default=False, true_prob=0.5), track_running_stats=BinaryParam(name="track_running_stats", default=False, true_prob=0.5), process_group=Param(name="process_group", default=None), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple( "EmbeddingBag", [ "num_embeddings", "embedding_dim", "max_norm", "norm_type", "scale_grad_by_freq", "mode", "sparse", "_weight", ], ) self.params = self.template_fn( num_embeddings=IntParam(name="num_embeddings", default=1), embedding_dim=IntParam(name="embedding_dim", default=1), max_norm=Param(name="max_norm", default=None), norm_type=IntParam(name="norm_type", default=2.0), scale_grad_by_freq=BinaryParam(name="scale_grad_by_freq", default=False, true_prob=0.5), mode=ChoiceParam(name="mode", choices=("mean", ), cprobs=(1, ), default="mean"), sparse=BinaryParam(name="sparse", default=False, true_prob=0.5), _weight=Param(name="_weight", default=None), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple( "AvgPool1d", [ "kernel_size", "stride", "padding", "ceil_mode", "count_include_pad" ], ) self.params = self.template_fn( kernel_size=TupleParam(name="kernel_size", size=1, limits=((1, ), (1, )), default=(1, )), stride=TupleParam(name="stride", size=1, limits=((1, ), (1, )), default=(1, )), padding=TupleParam(name="padding", size=1, limits=((0, ), (0, )), default=(0, )), ceil_mode=BinaryParam(name="ceil_mode", default=False, true_prob=0.5), count_include_pad=BinaryParam(name="count_include_pad", default=True, true_prob=0.5), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple( "MultiheadAttention", [ "embed_dim", "num_heads", "dropout", "bias", "add_bias_kv", "add_zero_attn", "kdim", "vdim", ], ) self.params = self.template_fn( embed_dim=Param(name="embed_dim", default=None), num_heads=Param(name="num_heads", default=None), dropout=IntParam(name="dropout", default=0.0), bias=BinaryParam(name="bias", default=True, true_prob=0.5), add_bias_kv=BinaryParam(name="add_bias_kv", default=False, true_prob=0.5), add_zero_attn=BinaryParam(name="add_zero_attn", default=False, true_prob=0.5), kdim=Param(name="kdim", default=None), vdim=Param(name="vdim", default=None), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__( self, start_shape, stop_shape, min_features=10, max_features=128, depth=2, bias_prob=0.0, ): LinearOnly.__init__( self, start_shape, stop_shape, min_features, max_features, depth, bias_prob ) self.use_ac = BinaryParam(name="") self.use_ac.randomize(true_prob=0.7)
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple("SELU", ["inplace"]) self.params = self.template_fn( inplace=BinaryParam(name="inplace", default=False, true_prob=0.5)) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple( "FractionalMaxPool3d", [ "kernel_size", "output_size", "output_ratio", "return_indices", "_random_samples", ], ) self.params = self.template_fn( kernel_size=TupleParam( name="kernel_size", size=3, limits=((1, 1, 1), (1, 1, 1)), default=(1, 1, 1), ), output_size=TupleParam(name="output_size", size=3, default=(1, 1, 1)), output_ratio=Param(name="output_ratio", default=None), return_indices=BinaryParam(name="return_indices", default=False, true_prob=0.5), _random_samples=Param(name="_random_samples", default=None), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple("PairwiseDistance", ["p", "eps", "keepdim"]) self.params = self.template_fn( p=IntParam(name="p", default=2.0), eps=FloatParam(name="eps", default=1e-06), keepdim=BinaryParam(name="keepdim", default=False, true_prob=0.5), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple( "MaxPool3d", [ "kernel_size", "stride", "padding", "dilation", "return_indices", "ceil_mode", ], ) self.params = self.template_fn( kernel_size=TupleParam( name="kernel_size", size=3, limits=((1, 1, 1), (1, 1, 1)), default=(1, 1, 1), ), stride=TupleParam(name="stride", size=3, limits=((1, 1, 1), (1, 1, 1)), default=(1, 1, 1)), padding=TupleParam(name="padding", size=3, limits=((0, 0, 0), (0, 0, 0)), default=(0, 0, 0)), dilation=TupleParam( name="dilation", size=3, limits=((1, 1, 1), (1, 1, 1)), default=(1, 1, 1), ), return_indices=BinaryParam(name="return_indices", default=False, true_prob=0.5), ceil_mode=BinaryParam(name="ceil_mode", default=False, true_prob=0.5), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
class LinearAC(LinearOnly): ac = ActivationParam() def __init__( self, start_shape, stop_shape, min_features=10, max_features=128, depth=2, bias_prob=0.0, ): LinearOnly.__init__( self, start_shape, stop_shape, min_features, max_features, depth, bias_prob ) self.use_ac = BinaryParam(name="") self.use_ac.randomize(true_prob=0.7) def generate(self): limits = self.limits unit_list = [] for j in range(self.depth): if j == 0: in_shape = self.start_shape else: in_shape = unit_list[-1].out_shape if j == self.depth - 1: out_shape = self.stop_shape else: out_shape = None unit_list.append(self.layers[0](in_shape, out_shape)) if self.use_ac.value and out_shape is None: unit_list.append( self.ac.val(unit_list[-1].out_shape, unit_list[-1].out_shape) ) self.layers[0].out_features.limits = (limits[0], unit_list[-1].out_shape[0]) self.layers[0].out_features.limits = limits return unit_list def __call__(self, num_nets, startnum=1): self.name = "Linear{}".format(self.ac.val.__class__.__name__) return _Net.__call__(self, num_nets, startnum)
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple("LeakyReLU", ["negative_slope", "inplace"]) self.params = self.template_fn( negative_slope=FloatParam(name="negative_slope", default=0.01), inplace=BinaryParam(name="inplace", default=False, true_prob=0.5), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple("RReLU", ["lower", "upper", "inplace"]) self.params = self.template_fn( lower=FloatParam(name="lower", default=0.125), upper=FloatParam(name="upper", default=0.3333333333333333), inplace=BinaryParam(name="inplace", default=False, true_prob=0.5), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
class ConvAC(ConvOnly): ac = ActivationParam() def __init__(self, start_shape, stop_shape, depth, convclass, bias_prob): ConvOnly.__init__(self, start_shape, stop_shape, depth, convclass, bias_prob) self.use_ac = BinaryParam(name="") self.use_ac.randomize(true_prob=0.7) def generate(self, resnet_cover=256): t = len(self.start_shape) - 1 self.layers[0].kernel_size.randomize( limits=((1, ) * t, (min(list(self.start_shape[1:]) + [resnet_cover]), ) * t)) limits = self.layers[0].kernel_size.limits unit_list = [] for j in range(self.depth): if j == 0: in_shape = self.start_shape else: in_shape = unit_list[-1].out_shape if j == self.depth - 1: out_shape = self.stop_shape else: out_shape = None unit_list.append(self.layers[0](in_shape, out_shape)) if self.use_ac.value and out_shape is None: unit_list.append( self.ac.val(unit_list[-1].out_shape, unit_list[-1].out_shape)) out_shape = unit_list[-1].out_shape t = len(out_shape) - 1 self.layers[0].kernel_size.randomize( limits=((1, ) * t, (min(out_shape[1:]), ) * t)) self.layers[0].kernel_size.randomize(limits=limits) return unit_list def __call__(self, num_nets, startnum=1): self.name = "{}".format( self.__class__.__name__.replace("AC", self.ac.val.__class__.__name__)) return _Net.__call__(self, num_nets, startnum)
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple("Threshold", ["threshold", "value", "inplace"]) self.params = self.template_fn( threshold=Param(name="threshold", default=None), value=Param(name="value", default=None), inplace=BinaryParam(name="inplace", default=False, true_prob=0.5), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple("GRUCell", ["input_size", "hidden_size", "bias"]) self.params = self.template_fn( input_size=IntParam(name="input_size", default=1), hidden_size=Param(name="hidden_size", default=None), bias=BinaryParam(name="bias", default=True, true_prob=0.5), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple("Linear", ["in_features", "out_features", "bias"]) self.params = self.template_fn( in_features=IntParam(name="in_features", default=1), out_features=IntParam(name="out_features", default=1), bias=BinaryParam(name="bias", default=True, true_prob=0.5), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple( "GroupNorm", ["num_groups", "num_channels", "eps", "affine"]) self.params = self.template_fn( num_groups=Param(name="num_groups", default=None), num_channels=Param(name="num_channels", default=None), eps=FloatParam(name="eps", default=1e-05), affine=BinaryParam(name="affine", default=False, true_prob=0.5), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple("AdaptiveMaxPool2d", ["output_size", "return_indices"]) self.params = self.template_fn( output_size=TupleParam(name="output_size", size=2, default=(1, 1)), return_indices=BinaryParam(name="return_indices", default=False, true_prob=0.5), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple( "ConvTranspose3d", [ "in_channels", "out_channels", "kernel_size", "stride", "padding", "output_padding", "groups", "bias", "dilation", "padding_mode", ], ) self.params = self.template_fn( in_channels=IntParam(name="in_channels", default=1), out_channels=IntParam(name="out_channels", default=1), kernel_size=TupleParam( name="kernel_size", size=3, limits=((1, 1, 1), (1, 1, 1)), default=(1, 1, 1), ), stride=TupleParam( name="stride", size=3, limits=((1, 1, 1), (1, 1, 1)), default=(1, 1, 1) ), padding=TupleParam( name="padding", size=3, limits=((0, 0, 0), (0, 0, 0)), default=(0, 0, 0) ), output_padding=TupleParam( name="output_padding", size=3, limits=((0, 0, 0), (0, 0, 0)), default=(0, 0, 0), ), groups=IntParam(name="groups", default=1), bias=BinaryParam(name="bias", default=True, true_prob=0.5), dilation=TupleParam( name="dilation", size=3, limits=((1, 1, 1), (1, 1, 1)), default=(1, 1, 1), ), padding_mode=ChoiceParam( name="padding_mode", choices=("zeros",), cprobs=(1,), default="zeros" ), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple( "LayerNorm", ["normalized_shape", "eps", "elementwise_affine"]) self.params = self.template_fn( normalized_shape=Param(name="normalized_shape", default=None), eps=FloatParam(name="eps", default=1e-05), elementwise_affine=BinaryParam(name="elementwise_affine", default=False, true_prob=0.5), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple( "Hardtanh", ["min_val", "max_val", "inplace", "min_value", "max_value"]) self.params = self.template_fn( min_val=IntParam(name="min_val", default=-1.0), max_val=IntParam(name="max_val", default=1.0), inplace=BinaryParam(name="inplace", default=False, true_prob=0.5), min_value=Param(name="min_value", default=None), max_value=Param(name="max_value", default=None), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple( "AdaptiveLogSoftmaxWithLoss", ["in_features", "n_classes", "cutoffs", "div_value", "head_bias"], ) self.params = self.template_fn( in_features=IntParam(name="in_features", default=1), n_classes=Param(name="n_classes", default=None), cutoffs=Param(name="cutoffs", default=None), div_value=IntParam(name="div_value", default=4.0), head_bias=BinaryParam(name="head_bias", default=False, true_prob=0.5), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple( "RNNCell", ["input_size", "hidden_size", "bias", "nonlinearity"]) self.params = self.template_fn( input_size=IntParam(name="input_size", default=1), hidden_size=Param(name="hidden_size", default=None), bias=BinaryParam(name="bias", default=True, true_prob=0.5), nonlinearity=ChoiceParam(name="nonlinearity", choices=("tanh", ), cprobs=(1, ), default="tanh"), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple( "PoissonNLLLoss", [ "log_input", "full", "size_average", "eps", "reduce", "reduction" ], ) self.params = self.template_fn( log_input=BinaryParam(name="log_input", default=True, true_prob=0.5), full=BinaryParam(name="full", default=False, true_prob=0.5), size_average=Param(name="size_average", default=None), eps=FloatParam(name="eps", default=1e-08), reduce=Param(name="reduce", default=None), reduction=ChoiceParam(name="reduction", choices=("mean", ), cprobs=(1, ), default="mean"), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple( "LPPool2d", ["norm_type", "kernel_size", "stride", "ceil_mode"]) self.params = self.template_fn( norm_type=Param(name="norm_type", default=None), kernel_size=TupleParam(name="kernel_size", size=2, limits=((1, 1), (1, 1)), default=(1, 1)), stride=TupleParam(name="stride", size=2, limits=((1, 1), (1, 1)), default=(1, 1)), ceil_mode=BinaryParam(name="ceil_mode", default=False, true_prob=0.5), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, **kwargs): _Factory.__init__(self) self.template_fn = namedtuple( "TripletMarginLoss", [ "margin", "p", "eps", "swap", "size_average", "reduce", "reduction" ], ) self.params = self.template_fn( margin=IntParam(name="margin", default=1.0), p=IntParam(name="p", default=2.0), eps=FloatParam(name="eps", default=1e-06), swap=BinaryParam(name="swap", default=False, true_prob=0.5), size_average=Param(name="size_average", default=None), reduce=Param(name="reduce", default=None), reduction=ChoiceParam(name="reduction", choices=("mean", ), cprobs=(1, ), default="mean"), ) for k, v in kwargs.items(): getattr(self.params, k).val = v
def __init__(self, start_shape, stop_shape, depth, convclass, bias_prob): ConvOnly.__init__(self, start_shape, stop_shape, depth, convclass, bias_prob) self.use_ac = BinaryParam(name="") self.use_ac.randomize(true_prob=0.7)