示例#1
0
class_means = np.zeros((512, nb_groups * nb_cl, 2, nb_groups))
files_protoset = []
for _ in range(nb_groups * nb_cl):
    files_protoset.append([])

### Preparing the files for the training/validation ###

np.random.seed(1993)
order = np.arange(nb_groups * nb_cl)

# Preparing the files per group of classes
print("Loading data ...")
output_file.write("Loading data ...\n")
output_file.flush()
files_train, files_valid = utils_data.prepare_files(train_images_path,
                                                    val_images_path, nb_groups,
                                                    nb_cl)

with open(save_path + str(nb_cl) + 'settings_resnet.pickle', 'wb') as fp:
    cPickle.dump(order, fp)
    cPickle.dump(files_valid, fp)
    cPickle.dump(files_train, fp)

### Start of the main algorithm ###

for itera in range(nb_groups):

    # Files to load : training samples + protoset
    print('Batch of classes number {0} arrives ...'.format(itera + 1))
    output_file.write(
        'Batch of classes number {0} arrives ...\n'.format(itera + 1))
示例#2
0
np.random.seed(1993)
order = np.arange(1000)
mixing = np.arange(1000)
np.random.shuffle(mixing)

# Loading the labels
labels_dic, label_names, validation_ground_truth = utils_data.parse_devkit_meta(
    devkit_path)
# Or you can just do like this
# define_class = ['apple', 'banana', 'cat', 'dog', 'elephant', 'forg']
# labels_dic = {k: v for v, k in enumerate(define_class)}

# Preparing the files per group of classes
print("Creating a validation set ...")
files_train, files_valid = utils_data.prepare_files(train_path, mixing, order,
                                                    labels_dic, nb_groups,
                                                    nb_cl, nb_val)

# Pickle order and files lists and mixing
with open(str(nb_cl) + 'mixing.pickle', 'wb') as fp:
    cPickle.dump(mixing, fp)

with open(str(nb_cl) + 'settings_resnet.pickle', 'wb') as fp:
    cPickle.dump(order, fp)
    cPickle.dump(files_valid, fp)
    cPickle.dump(files_train, fp)

### Start of the main algorithm ###

for itera in range(nb_groups):
示例#3
0
# Random mixing
print("Mixing the classes and putting them in batches of classes...")
np.random.seed(1993)
order  = np.arange(nb_groups * nb_cl)
mixing = np.arange(nb_groups * nb_cl)
np.random.shuffle(mixing)

# Loading the labels
labels_dic, label_names, validation_ground_truth = utils_data.parse_devkit_meta(devkit_path)
# Or you can just do like this
# define_class = ['apple', 'banana', 'cat', 'dog', 'elephant', 'forg']
# labels_dic = {k: v for v, k in enumerate(define_class)}

# Preparing the files per group of classes
print("Creating a validation set ...")
files_train, files_valid = utils_data.prepare_files(train_path, mixing, order, labels_dic, nb_groups, nb_cl, nb_val)

# Pickle order and files lists and mixing
with open(str(nb_cl)+'mixing.pickle','wb') as fp:
    cPickle.dump(mixing,fp)

with open(str(nb_cl)+'settings_resnet.pickle','wb') as fp:
    cPickle.dump(order,fp)
    cPickle.dump(files_valid,fp)
    cPickle.dump(files_train,fp)


### Start of the main algorithm ###

for itera in range(nb_groups):