def minimodel(Inputs,feature_dropout=-1.): x = Inputs[0] #this is the self.x list from the TrainData data structure energy_raw = SelectFeatures(0,3)(x) x = BatchNormalization(momentum=0.6)(x) feat=[x] for i in range(6): #add global exchange and another dense here x = GlobalExchange()(x) x = Dense(64, activation='elu')(x) x = Dense(64, activation='elu')(x) x = BatchNormalization(momentum=0.6)(x) x = Dense(64, activation='elu')(x) x = GravNet_simple(n_neighbours=10, n_dimensions=4, n_filters=128, n_propagate=64)(x) x = BatchNormalization(momentum=0.6)(x) feat.append(Dense(32, activation='elu')(x)) x = Concatenate()(feat) x = Dense(64, activation='elu')(x) return Model(inputs=Inputs, outputs=output_block(x,checkids(Inputs),energy_raw))
def gravnet(Inputs, nclasses, nregressions, otheroption=0): ''' Just from the top of my head... ''' nfilters = 48 nprop = 22 ndims = 4 x = Inputs[0] #this is the self.x list from the TrainData data structure x = BatchNormalization(momentum=0.9)(x) feat = [] for i in range(4): x = GlobalExchange()(x) x = Conv1D(64, 1, activation='elu')(x) x = Conv1D(64, 1, activation='elu')(x) x = Conv1D(64, 1, activation='tanh')(x) x = GravNet(n_neighbours=40, n_dimensions=ndims, n_filters=nfilters, n_propagate=nprop, name='GravNet0_' + str(i))(x) x = BatchNormalization(momentum=0.9)(x) feat.append(x) x = Concatenate()(feat) x = Conv1D(128, 1, activation='elu', name='last_conv_a')(x) x = Conv1D(3, 1, activation='elu', name='last_conv_b')(x) x = Conv1D(2, 1, activation='softmax', name="last_conv_output")(x) #that should be a 2 predictions = [x] return Model(inputs=Inputs, outputs=predictions)
def gravnet_model(Inputs, nclasses, nregressions, feature_dropout=0.1): x = Inputs[0] #this is the self.x list from the TrainData data structure print('x', x.shape) coords = [] etas = SelectFeatures(1, 2)(x) #just to propagate to the prediction mask = CreateZeroMask(0)(x) x = BatchNormalization(momentum=0.9)(x) x = Multiply()([x, mask]) x, coord = GravNet(n_neighbours=40, n_dimensions=4, n_filters=80, n_propagate=16, name='gravnet_pre', also_coordinates=True)(x) coords.append(coord) x = BatchNormalization(momentum=0.9)(x) x = Multiply()([x, mask]) feats = [] for i in range(n_gravnet_layers): x = GlobalExchange()(x) x = Multiply()([x, mask]) x = Dense(64, activation='tanh')(x) x = Dense(64, activation='tanh')(x) x = BatchNormalization(momentum=0.9)(x) x = Multiply()([x, mask]) x = Dense(64, activation='sigmoid')(x) x = Multiply()([x, mask]) x, coord = GravNet(n_neighbours=40, n_dimensions=4, n_filters=80, n_propagate=16, name='gravnet_' + str(i), also_coordinates=True, feature_dropout=feature_dropout)(x) coords.append(coord) x = BatchNormalization(momentum=0.9)(x) x = Multiply()([x, mask]) feats.append(x) x = Concatenate()(feats) x = Dense(64, activation='elu', name='pre_last_correction')(x) x = BatchNormalization(momentum=0.9)(x) x = Multiply()([x, mask]) x = Dense(nregressions, activation=None, kernel_initializer='zeros')(x) #x = Clip(-0.5, 1.5) (x) x = Multiply()([x, mask]) #x = SortPredictionByEta(input_energy_index=0, input_eta_index=1)([x,Inputs[0]]) x = Concatenate()([x] + coords + [etas]) predictions = [x] return Model(inputs=Inputs, outputs=predictions)
def gravnet_model(Inputs, nclasses, nregressions, otheroption): x = Inputs[0] #this is the self.x list from the TrainData data structure print('x', x.shape) coords = [] mask = CreateZeroMask(0)(x) x = BatchNormalization(momentum=0.9)(x) x = Multiply()([x, mask]) x, coord = GravNet(n_neighbours=40, n_dimensions=4, n_filters=80, n_propagate=16, name='gravnet_pre', also_coordinates=True)(x) coords.append(coord) x = BatchNormalization(momentum=0.9)(x) x = Multiply()([x, mask]) feats = [] for i in range(n_gravnet_layers): x = GlobalExchange()(x) x = Multiply()([x, mask]) x = Dense(64, activation='tanh')(x) x = Dense(64, activation='tanh')(x) x = BatchNormalization(momentum=0.9)(x) x = Multiply()([x, mask]) x = Dense(64, activation='sigmoid')(x) x = Multiply()([x, mask]) x, coord = GravNet(n_neighbours=40, n_dimensions=4, n_filters=80, n_propagate=16, name='gravnet_' + str(i), also_coordinates=True)(x) coords.append(coord) x = BatchNormalization(momentum=0.9)(x) x = Multiply()([x, mask]) feats.append(x) x = Concatenate()(feats) x = Dense(64, activation='elu', name='pre_last_correction')(x) x = BatchNormalization(momentum=0.9)(x) x = Multiply()([x, mask]) x = Dense(nregressions, activation=None, kernel_initializer=keras.initializers.RandomNormal( mean=0.0, stddev=0.001))(x) #x = Clip(-0.5, 1.5) (x) x = Multiply()([x, mask]) x = Concatenate()([x] + coords) predictions = [x] return Model(inputs=Inputs, outputs=predictions)
def gravnet_model(Inputs, nclasses, nregressions, otheroption): x = Inputs[0] #this is the self.x list from the TrainData data structure print('x', x.shape) mask = CreateZeroMask(0)(x) x = BatchNormalization(momentum=0.6)(x) coords = [] feats = [] for i in range(n_gravnet_layers): x = GlobalExchange()(x) x = Multiply()([x, mask]) x = Dense(64, activation='elu')(x) x = Dense(64, activation='elu')(x) x = BatchNormalization(momentum=0.3)(x) x = GarNet(n_aggregators=8, n_filters=64, n_propagate=16)(x) x = Dense(64, activation='tanh')(x) x = BatchNormalization(momentum=0.3)(x) x, coord = GravNet(n_neighbours=40, n_dimensions=4, n_filters=64, n_propagate=16, also_coordinates=True)(x) coords.append(coord) x = BatchNormalization(momentum=0.3)(x) x = Multiply()([x, mask]) feats.append(x) x = Concatenate()(feats) x = Dense(32, activation='elu', name='pre_last_correction')(x) x = Dense(nregressions, activation=None, kernel_initializer='zeros', use_bias=False)(x) #max 1 shower here x = Clip(-0.2, 1.2)(x) x = Concatenate()([x] + coords) predictions = [x] return Model(inputs=Inputs, outputs=predictions)
def garnet_model(Inputs, nclasses, nregressions, otheroption): nfilters = 48 x = Inputs[0] #this is the self.x list from the TrainData data structure print('x', x.shape) mask = CreateZeroMask(0)(x) x = BatchNormalization(momentum=0.6)(x) x = Multiply()([x, mask]) feats = [] for i in range(n_garnet_layers): x = GlobalExchange()(x) x = Multiply()([x, mask]) x = Dense(nfilters, activation='tanh')(x) x = BatchNormalization(momentum=0.6)(x) x = Multiply()([x, mask]) x = GarNet( n_aggregators=8, n_filters=nfilters, n_propagate=8, name='garnet_' + str(i), )(x) x = BatchNormalization(momentum=0.6)(x) x = Multiply()([x, mask]) if n_garnet_layers <= 10 or i % 2 == 0: feats.append(x) x = Concatenate()(feats) x = Dense(32, activation='tanh', name='pre_last_correction')(x) x = Dense(nregressions, activation=None, name='pre_last_correction_sm')(x) #x = SelectFeatures(1,nregressions+1)(x) #zero is the no-simcluster index #this cannot be used for the final trainings, but to get an idea of what is happening this might be useful #x = Clip(-0.5, 1.5) (x) #x = Concatenate()([x]+coords) predictions = [x] return Model(inputs=Inputs, outputs=predictions)
def gravnet_model(Inputs, nclasses, nregressions, feature_dropout=-1.): coords = [] feats = [] x = Inputs[0] #this is the self.x list from the TrainData data structure x = CenterPhi(2)(x) mask = CreateZeroMask(0)(x) x_in = norm_and_mask(x, mask) etas_phis = SelectFeatures(1, 3)( x) #eta, phi, just to propagate to the prediction r_coordinate = SelectFeatures(4, 5)(x) energy = SelectFeatures(0, 1)(x) x = Concatenate()([etas_phis, r_coordinate, x]) #just for the kernel initializer x = norm_and_mask(x, mask) x, coord = GravNet(n_neighbours=40, n_dimensions=3, n_filters=80, n_propagate=16, name='gravnet_pre', fix_coordinate_space=True, also_coordinates=True, masked_coordinate_offset=-10)([x, mask]) x = norm_and_mask(x, mask) coords.append(coord) feats.append(x) for i in range(n_gravnet_layers): x = GlobalExchange()(x) x = Dense(64, activation='elu', name='dense_a_' + str(i))(x) x = norm_and_mask(x, mask) x = Dense(64, activation='elu', name='dense_b_' + str(i))(x) #x = Concatenate()([TransformCoordinates()(x),x]) x = Dense(64, activation='elu', name='dense_c_' + str(i))(x) x = norm_and_mask(x, mask) x, coord = GravNet( n_neighbours=40, n_dimensions=4, n_filters=80, n_propagate=16, name='gravnet_' + str(i), also_coordinates=True, feature_dropout=feature_dropout, masked_coordinate_offset=-10)([ x, mask ]) #shift+activation makes it impossible to mix real with zero-pad x = norm_and_mask(x, mask) coords.append(coord) feats.append(x) x = Concatenate()(feats) x = Dense(64, activation='elu', name='dense_a_last')(x) x = Dense(64, activation='elu', name='dense_b_last')(x) x = norm_and_mask(x, mask) x = Dense(64, activation='elu', name='dense_c_last')(x) x = norm_and_mask(x, mask) n_showers = AveragePoolVertices(keepdims=True)(x) n_showers = Dense(64, activation='elu', name='dense_n_showers_a')(n_showers) n_showers = Dense(1, activation=None, name='dense_n_showers')(n_showers) x = Dense(nregressions, activation=None, name='dense_pre_fracs')(x) x = Concatenate()([x, x_in]) x = Dense(64, activation='elu', name='dense_last_correction')(x) x = Dense(nregressions, activation=None, name='dense_fracs', kernel_initializer=keras.initializers.RandomNormal( mean=0.0, stddev=0.01))(x) x = Concatenate(name="concatlast", axis=-1)([x] + coords + [n_showers] + [etas_phis]) x = Multiply()([x, mask]) predictions = [x] return Model(inputs=Inputs, outputs=predictions)