예제 #1
0
        #files protoset est une liste de 1000 listes, chaque liste contient les exemplaire (paths) d'une classe
        files_from_cl = files_train[itera][:]
        for i in range(itera * nb_cl):  # pour toutes les classes anterieures
            # nb_protos_cl = int( np.ceil(nb_proto * nb_groups * 1. / itera))
            #pour batch courant:
            nb_protos_cl = int(math.ceil(nb_proto / (nb_cl * itera)))
            tmp_var = files_protoset[
                i]  #recuperer la listes des exemplaires de cette classe
            files_from_cl += tmp_var[0:min(
                len(tmp_var), nb_protos_cl
            )]  #ajouter les exemplaires (paths) disponibles a la liste courante de train

    #files from cl now contains paths of new+old data (imbalanced)

    ## Import the data reader ##
    image_train, label_train = utils_data.read_data(
        files_from_cl=files_from_cl)
    image_batch, label_batch_0 = tf.train.batch([image_train, label_train],
                                                batch_size=batch_size,
                                                num_threads=8)
    label_batch = tf.one_hot(label_batch_0, nb_groups * nb_cl)

    ## Define the objective for the neural network ##
    if itera == 0:
        # No distillation
        variables_graph, variables_graph2, scores, scores_stored = utils_icarl.prepare_networks(
            images_mean, device, image_batch, nb_cl, nb_groups)

        # Define the objective for the neural network: 1 vs all cross_entropy
        with tf.device(device):
            scores = tf.concat(scores, 0)
            l2_reg = wght_decay * tf.reduce_sum(
예제 #2
0
    print('Batch of classes number {0} arrives ...'.format(itera + 1))

    # Adding the stored exemplars to the training set
    if itera == 0:
        files_from_cl = files_train[itera]
    else:
        files_from_cl = files_train[itera][:]
        for i in range(itera * nb_cl):
            nb_protos_cl = int(np.ceil(
                nb_proto * nb_groups * 1. /
                itera))  # Reducing number of exemplars of the previous classes
            tmp_var = files_protoset[i]
            files_from_cl += tmp_var[0:min(len(tmp_var), nb_protos_cl)]

    ## Import the data reader ##
    image_train, label_train = utils_data.read_data(
        train_path, labels_dic, mixing, files_from_cl=files_from_cl)
    image_batch, label_batch_0 = tf.train.batch([image_train, label_train],
                                                batch_size=batch_size,
                                                num_threads=8)
    label_batch = tf.one_hot(label_batch_0, nb_groups * nb_cl)

    ## Define the objective for the neural network ##
    if itera == 0:
        # No distillation
        variables_graph, variables_graph2, scores, scores_stored = utils_icarl.prepare_networks(
            gpu, image_batch, nb_cl, nb_groups)

        # Define the objective for the neural network: 1 vs all cross_entropy
        with tf.device('/gpu:' + gpu):
            scores = tf.concat(scores, 0)
            l2_reg = wght_decay * tf.reduce_sum(
예제 #3
0
for itera in range(nb_groups):
  
  # Files to load : training samples + protoset
  print('Batch of classes number {0} arrives ...'.format(itera+1))
  # Adding the stored exemplars to the training set
  if itera == 0:
    files_from_cl = files_train[itera]
  else:
    files_from_cl = files_train[itera][:]
    for i in range(itera*nb_cl):
      nb_protos_cl  = int(np.ceil(nb_proto*nb_groups*1./itera)) # Reducing number of exemplars of the previous classes
      tmp_var = files_protoset[i]
      files_from_cl += tmp_var[0:min(len(tmp_var),nb_protos_cl)]
  
  ## Import the data reader ##
  image_train, label_train   = utils_data.read_data(train_path, labels_dic, mixing, files_from_cl=files_from_cl)  
  image_batch, label_batch_0 = tf.train.batch([image_train, label_train], batch_size=batch_size, num_threads=8)
  label_batch = tf.one_hot(label_batch_0,nb_groups*nb_cl)
  
  ## Define the objective for the neural network ##
  if itera == 0:
    # No distillation
    variables_graph,variables_graph2,scores,scores_stored = utils_icarl.prepare_networks(gpu,image_batch, nb_cl, nb_groups)
    
    # Define the objective for the neural network: 1 vs all cross_entropy
    with tf.device('/gpu:0'):
        scores        = tf.concat(scores,0)
        l2_reg        = wght_decay * tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope='ResNet18'))
        loss_class    = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=label_batch, logits=scores)) 
        loss          = loss_class + l2_reg
        learning_rate = tf.placeholder(tf.float32, shape=[])