def __init__(self, branch_units, activation=Rectlin(), bias_init=UniformInit(low=-0.08, high=0.08), filter_init=XavierInit()): (p1, p2, p3, p4) = branch_units self.branch_1 = Convolution((1, 1, p1[0]), activation=activation, bias_init=bias_init, filter_init=filter_init) self.branch_2 = [ Convolution((1, 1, p2[0]), activation=activation, bias_init=bias_init, filter_init=filter_init), Convolution((3, 3, p2[1]), activation=activation, bias_init=bias_init, filter_init=filter_init, padding=1) ] self.branch_3 = [ Convolution((1, 1, p3[0]), activation=activation, bias_init=bias_init, filter_init=filter_init), Convolution((5, 5, p3[1]), activation=activation, bias_init=bias_init, filter_init=filter_init, padding=2) ] self.branch_4 = [ Pooling(pool_shape=(3, 3), padding=1, strides=1, pool_type="max"), Convolution((1, 1, p3[0]), activation=activation, bias_init=bias_init, filter_init=filter_init) ]
outputs = [ branch_1_output, branch_2_output, branch_3_output, branch_4_output ] # This does the equivalent of neon's merge-broadcast return ng.concat_along_axis(outputs, branch_1_output.axes.channel_axis()) seq1 = Sequential([ Convolution((7, 7, 64), padding=3, strides=2, activation=Rectlin(), bias_init=bias_init, filter_init=XavierInit()), Pooling(pool_shape=(3, 3), padding=1, strides=2, pool_type='max'), Convolution((1, 1, 64), activation=Rectlin(), bias_init=bias_init, filter_init=XavierInit()), Convolution((3, 3, 192), activation=Rectlin(), bias_init=bias_init, filter_init=XavierInit(), padding=1), Pooling(pool_shape=(3, 3), padding=1, strides=2, pool_type='max'), Inception([(64, ), (96, 128), (16, 32), (32, )]), Inception([(128, ), (128, 192), (32, 96), (64, )]), Pooling(pool_shape=(3, 3), padding=1, strides=2, pool_type='max'), Inception([(192, ), (96, 208), (16, 48), (64, )]), Inception([(160, ), (112, 224), (24, 64), (64, )]),
def __init__(self, net_type, resnet_size, bottleneck, num_resnet_mods, batch_norm=True): # For CIFAR10 dataset if (net_type in ('cifar10', 'cifar100')): # Number of Filters num_fils = [16, 32, 64] # Network Layers layers = [ # Subtracting mean as suggested in paper Preprocess(functor=cifar10_mean_subtract), # First Conv with 3x3 and stride=1 Convolution(**conv_params(3, 16, batch_norm=batch_norm))] first_resmod = True # Indicates the first residual module # Loop 3 times for each filter. for fil in range(3): # Lay out n residual modules so that we have 2n layers. for resmods in range(num_resnet_mods): if(resmods == 0): if(first_resmod): # Strides=1 and Convolution side path main_path, side_path = self.get_mp_sp(num_fils[fil], net_type, direct=False, batch_norm=batch_norm) layers.append(ResidualModule(main_path, side_path)) layers.append(Activation(Rectlin())) first_resmod = False else: # Strides=2 and Convolution side path main_path, side_path = self.get_mp_sp(num_fils[fil], net_type, direct=False, strides=2, batch_norm=batch_norm) layers.append(ResidualModule(main_path, side_path)) layers.append(Activation(Rectlin())) else: # Strides=1 and direct connection main_path, side_path = self.get_mp_sp(num_fils[fil], net_type, batch_norm=batch_norm) layers.append(ResidualModule(main_path, side_path)) layers.append(Activation(Rectlin())) # Do average pooling --> fully connected--> softmax. layers.append(Pooling((8, 8), pool_type='avg')) layers.append(Affine(axes=ax.Y, weight_init=KaimingInit(), batch_norm=batch_norm)) layers.append(Activation(Softmax())) # For I1K dataset elif (net_type in ('i1k', 'i1k100')): # Number of Filters num_fils = [64, 128, 256, 512] # Number of residual modules we need to instantiate at each level num_resnet_mods = num_i1k_resmods(resnet_size) # Network layers layers = [ # Subtracting mean Preprocess(functor=i1k_mean_subtract), # First Conv layer Convolution((7, 7, 64), strides=2, padding=3, batch_norm=batch_norm, activation=Rectlin(), filter_init=KaimingInit()), # Max Pooling Pooling((3, 3), strides=2, pool_type='max', padding=1)] first_resmod = True # Indicates the first residual module for which strides are 1 # Loop 4 times for each filter for fil in range(4): # Lay out residual modules as in num_resnet_mods list for resmods in range(num_resnet_mods[fil]): if(resmods == 0): if(first_resmod): # Strides=1 and Convolution Side path main_path, side_path = self.get_mp_sp(num_fils[fil], net_type, direct=False, bottleneck=bottleneck, batch_norm=batch_norm) layers.append(ResidualModule(main_path, side_path)) layers.append(Activation(Rectlin())) first_resmod = False else: # Strides=2 and Convolution side path main_path, side_path = self.get_mp_sp(num_fils[fil], net_type, direct=False, bottleneck=bottleneck, strides=2, batch_norm=batch_norm) layers.append(ResidualModule(main_path, side_path)) layers.append(Activation(Rectlin())) else: # Strides=1 and direct connection main_path, side_path = self.get_mp_sp(num_fils[fil], net_type, bottleneck=bottleneck, batch_norm=batch_norm) layers.append(ResidualModule(main_path, side_path)) layers.append(Activation(Rectlin())) # Do average pooling --> fully connected--> softmax. layers.append(Pooling((7, 7), pool_type='avg')) layers.append(Affine(axes=ax.Y, weight_init=KaimingInit(), batch_norm=batch_norm)) layers.append(Activation(Softmax())) else: raise NameError("Incorrect dataset. Should be --dataset cifar10 or --dataset i1k") super(BuildResnet, self).__init__(layers=layers)
def cifar_mean_subtract(x): bgr_mean = ng.constant( const=np.array([104., 119., 127.]), axes=[x.axes.channel_axis()]) return (x - bgr_mean) / 255. init_uni = UniformInit(-0.1, 0.1) seq1 = Sequential([Preprocess(functor=cifar_mean_subtract), Convolution((5, 5, 16), filter_init=init_uni, activation=Rectlin(), batch_norm=args.use_batch_norm), Pooling((2, 2), strides=2), Convolution((5, 5, 32), filter_init=init_uni, activation=Rectlin(), batch_norm=args.use_batch_norm), Pooling((2, 2), strides=2), Affine(nout=500, weight_init=init_uni, activation=Rectlin(), batch_norm=args.use_batch_norm), Affine(axes=ax.Y, weight_init=init_uni, activation=Softmax())]) optimizer = GradientDescentMomentum(0.01, 0.9) train_prob = seq1(inputs['image']) train_loss = ng.cross_entropy_multi(train_prob, ng.one_hot(inputs['label'], axis=ax.Y)) batch_cost = ng.sequential([optimizer(train_loss), ng.mean(train_loss, out_axes=())]) train_outputs = dict(batch_cost=batch_cost) with Layer.inference_mode_on(): inference_prob = seq1(inputs['image'])
'label': {'data': y_train, 'axes': ('N',)}} train_set = ArrayIterator(train_data, batch_size=args.batch_size, total_iterations=args.num_iterations) inputs = train_set.make_placeholders(include_iteration=True) ax.Y.length = 1000 # number of outputs of last layer. # weight initialization init = UniformInit(low=-0.08, high=0.08) # Setup model seq1 = Sequential([Convolution((11, 11, 64), filter_init=GaussianInit(std=0.01), bias_init=init, activation=Rectlin(), padding=3, strides=4), Pooling((3, 3), strides=2), Convolution((5, 5, 192), filter_init=GaussianInit(std=0.01), bias_init=init, activation=Rectlin(), padding=2), Pooling((3, 3), strides=2), Convolution((3, 3, 384), filter_init=GaussianInit(std=0.03), bias_init=init, activation=Rectlin(), padding=1), Convolution((3, 3, 256), filter_init=GaussianInit(std=0.03), bias_init=init, activation=Rectlin(), padding=1), Convolution((3, 3, 256), filter_init=GaussianInit(std=0.03), bias_init=init, activation=Rectlin(), padding=1), Pooling((3, 3), strides=2), Affine(nout=4096, weight_init=GaussianInit(std=0.01),