itera)) # Reducing number of exemplars of the previous classes tmp_var = files_protoset[i] files_from_cl += tmp_var[0:min(len(tmp_var), nb_protos_cl)] ## Import the data reader ## image_train, label_train = utils_data.read_data( train_path, labels_dic, mixing, files_from_cl=files_from_cl) image_batch, label_batch_0 = tf.train.batch([image_train, label_train], batch_size=batch_size, num_threads=8) label_batch = tf.one_hot(label_batch_0, nb_groups * nb_cl) ## Define the objective for the neural network ## if itera == 0: # No distillation variables_graph, variables_graph2, scores, scores_stored = utils_icarl.prepare_networks( gpu, image_batch, nb_cl, nb_groups) # Define the objective for the neural network: 1 vs all cross_entropy with tf.device('/gpu:' + gpu): scores = tf.concat(scores, 0) l2_reg = wght_decay * tf.reduce_sum( tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope='ResNet18')) loss_class = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=label_batch, logits=scores)) loss = loss_class + l2_reg learning_rate = tf.placeholder(tf.float32, shape=[]) opt = tf.train.MomentumOptimizer(learning_rate, 0.9) train_step = opt.minimize(loss, var_list=variables_graph)
else: files_from_cl = files_train[itera][:] for i in range(itera*nb_cl): nb_protos_cl = int(np.ceil(nb_proto*nb_groups*1./itera)) # Reducing number of exemplars of the previous classes tmp_var = files_protoset[i] files_from_cl += tmp_var[0:min(len(tmp_var),nb_protos_cl)] ## Import the data reader ## image_train, label_train = utils_data.read_data(train_path, labels_dic, mixing, files_from_cl=files_from_cl) image_batch, label_batch_0 = tf.train.batch([image_train, label_train], batch_size=batch_size, num_threads=8) label_batch = tf.one_hot(label_batch_0,nb_groups*nb_cl) ## Define the objective for the neural network ## if itera == 0: # No distillation variables_graph,variables_graph2,scores,scores_stored = utils_icarl.prepare_networks(gpu,image_batch, nb_cl, nb_groups) # Define the objective for the neural network: 1 vs all cross_entropy with tf.device('/gpu:0'): scores = tf.concat(scores,0) l2_reg = wght_decay * tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope='ResNet18')) loss_class = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=label_batch, logits=scores)) loss = loss_class + l2_reg learning_rate = tf.placeholder(tf.float32, shape=[]) opt = tf.train.MomentumOptimizer(learning_rate, 0.9) train_step = opt.minimize(loss,var_list=variables_graph) if itera > 0: # Distillation variables_graph,variables_graph2,scores,scores_stored = utils_icarl.prepare_networks(gpu,image_batch, nb_cl, nb_groups)