def get_loss_weight_metrics_for_plot(self): metrics, names = [], [] contrast_increment = 0 # increment to help loss weight visualization for l, loss in self.losses.items(): metrics.append(np.array(loss.weight_history) + contrast_increment) names.append(l) contrast_increment += 0.005 return [metrics], [1], [names], ['lines']
def get_metrics_for_plot(self): metrics, iters, names, types = [], [], [], [] for m, metric in self.metrics.items(): data = metric.get_data_for_plot() metrics.append(data) names.append(m) iters.append(self.notify_every) types.append(metric.plot_type) return metrics, iters, names, types
def get_loss_metrics_for_plot(self, plot_organization): metrics, iters, names = [], [], [] for l in plot_organization: if isinstance(l, (tuple, list)): submetrics, subiters, subnames, _ = self.get_loss_metrics_for_plot( l) metrics.append(submetrics) iters.append(subiters[0]) names.append(subnames) else: metrics.append(self.losses[l].history) iters.append(1) names.append(l) return metrics, iters, names, ['lines'] * len(metrics)
def configureModel(input, outputLen = len(RD['labels'])): print(' Initializing and compiling...') alphaSize = input.shape[2] model = Sequential() ''' if RD['use_embedding']: # second value in nomiSize tuple is shift while using embedding model.add(Embedding(1 << nomiSize[1], RP['embedding_outputs'])) model.add(TimeDistributed(Dense(int(RP['td_layer_multiplier'] * (alphaSize + nomiSize[0])), activation = 'tanh', trainable = RP['trainable_inner']))) else: ''' model.add(TimeDistributed(Dense(300*RG['ratios'][0], activation = 'tanh', trainable = RP['trainable_inner']), input_shape = (None, alphaSize ))) model.add(Dropout(0.30)) model.add(GRU(300*RG['ratios'][1], trainable = RP['trainable_inner'], return_sequences = True)) model.add(Activation('tanh', trainable = RP['trainable_inner'])) model.add(Dropout(0.30)) model.add(GRU(300*RG['ratios'][2], trainable = RP['trainable_inner'])) model.add(Activation('tanh', trainable = RP['trainable_inner'])) model.add(Dropout(0.30)) model.add(Dense(outputLen)) # molweight # model = utility.loadModel('b3d9609da78bfbf0ad1a62ee6740df3b52f104b4', 'mol_') # all compounds # model = utility.loadModel('eab15a05a70b35d119c02fcc36b1cfaf27a0f36a', 'mol_') # maccs # model = utility.loadModel('67b51a1543b5d32b05671e4a08d193eed702ca54', 'mol_') # model.pop() # model.pop() # for i in xrange(len(model.layers)): # model.layers[0].trainable = False ''' model.add(Dropout(0.50)) model.add(Dense(500)) model.add(Activation('relu')) model.add(Dropout(0.50)) model.add(Dense(500)) model.add(Activation('relu')) model.add(Dropout(0.30)) ''' # model.add(Dense(outputLen)) if RP['classify']: model.add(Activation(RP['classify_activation'], trainable = RP['trainable_inner'])) metrics = [] if RP['classify']: metrics.append('accuracy') model.compile(loss = RP['objective'], optimizer = OPTIMIZER, metrics = metrics) print(' ...done') return model
def configureEdgeModel(inputSmiles, inputFasta): print(' Initializing edge model and compiling...') smilesGRUInputShape = (None, inputSmiles.shape[2]) # smilesGRUSize = int(RP['gru_layer_multiplier'] * smilesGRUInputShape[1]) fastaGRUInputShape = (None, inputFasta.shape[2]) # fastaGRUSize = int(RP['fasta_gru_layer_multiplier'] * fastaGRUInputShape[1]) mergedOutputLen = len(RD['labels']) smilesModel = Sequential() smilesModel.add(TimeDistributed(Dense(300, activation = 'tanh', trainable = RP['trainable_inner']), input_shape = smilesGRUInputShape)) smilesModel.add(Dropout(0.30)) smilesModel.add(GRU(300, trainable = RP['trainable_inner'], return_sequences = True)) smilesModel.add(Activation('tanh', trainable = RP['trainable_inner'])) smilesModel.add(Dropout(0.30)) smilesModel.add(GRU(300, trainable = RP['trainable_inner'])) smilesModel.add(Activation('tanh', trainable = RP['trainable_inner'])) # utility.setModelConsumeLess(smilesModel, 'mem') ''' smilesModel = utility.loadModel('24e62794bb6d5b5c562e41a3a2cccc3525fa625f', 'smiles_') smilesModel.pop() # output smilesModel.pop() # dropout ''' # utility.setModelConsumeLess(smilesModel, 'gpu') fastaModel = Sequential() fastaModel.add(TimeDistributed(Dense(300, activation = 'tanh', trainable = RP['trainable_inner']), input_shape = fastaGRUInputShape)) fastaModel.add(Dropout(0.30)) fastaModel.add(GRU(300, trainable = RP['trainable_inner'], return_sequences = True)) fastaModel.add(Activation('tanh', trainable = RP['trainable_inner'])) fastaModel.add(Dropout(0.30)) fastaModel.add(GRU(300, trainable = RP['trainable_inner'])) fastaModel.add(Activation('tanh', trainable = RP['trainable_inner'])) # utility.setModelConsumeLess(fastaModel, 'mem') ''' fastaModel = utility.loadModel('e6beb8b7e146b9ab46a71db8f3001bf62d96ff08', 'fasta_') fastaModel.pop() # activation fastaModel.pop() # output fastaModel.pop() # dropout ''' # utility.setModelConsumeLess(fastaModel, 'gpu') merged = Merge([smilesModel, fastaModel], mode='concat') mergedModel = Sequential() mergedModel.add(merged) mergedModel.add(Dense(300)) mergedModel.add(Activation('relu')) mergedModel.add(Dropout(0.3)) mergedModel.add(Dense(300)) mergedModel.add(Activation('relu')) mergedModel.add(Dropout(0.3)) mergedModel.add(Dense(mergedOutputLen)) if RP['classify']: mergedModel.add(Activation(RP['classify_activation'], trainable = RP['trainable_inner'])) metrics = [] if RP['classify']: metrics.append('accuracy') mergedModel.compile(loss = RP['objective'], optimizer = OPTIMIZER, metrics = metrics) print(' ...done') return mergedModel