コード例 #1
0
    def __init__(self, args):
        super(cnn_TRSF, self).__init__()

        self.cnn_1 = cnn()
        self.cnn_2 = cnn()
        self.tr = TR(args, (24, 8, 8), 64)
        self.sf = SF(args, (24, 8, 8), 64)
        self.final = nn.Sequential(nn.Linear(128, 64), nn.ReLU(),
                                   nn.Linear(64, 32), nn.ReLU(),
                                   nn.Linear(32, 2))
コード例 #2
0
    def __init__(self, args):
        super(cnn_test2, self).__init__()

        self.cnn_1 = cnn()
        self.cnn_2 = cnn()
        self.tr_1 = TR(args, (24, 8, 8), 64)
        self.sf_1 = SF(args, (24, 8, 8), 64)
        self.tr_2 = TR(args, (24, 8, 8), 64)
        self.sf_2 = SF(args, (24, 8, 8), 64)
        self.af_1 = nn.Sequential(nn.Linear(128, 64), nn.ReLU(),
                                  nn.Linear(64, 64), nn.ReLU())
        self.af_2 = nn.Sequential(nn.Linear(128, 64), nn.ReLU(),
                                  nn.Linear(64, 64), nn.ReLU())
        self.final = nn.Sequential(nn.Linear(128, 64), nn.ReLU(),
                                   nn.Linear(64, 32), nn.ReLU(),
                                   nn.Dropout(p=0.1), nn.Linear(32, 2))
コード例 #3
0
    def __init__(self, args, fourier):
        super(dcnn, self).__init__()
        self.batch_size = args[0]
        self.cuda = args[1]
        cuda = self.cuda

        self.conv = transfer_img(fourier)
        self.cnn = cnn()
        self.fc = FC(24, 8, 8, 2, cuda)
コード例 #4
0
def main():
    # data_path='x_train.npy'
    # csv_path= '../Aidata/aoi_race/train.csv'
    #Images , Labels= get_dataset(data_path,csv_path)
    #x_train, y_train, x_test, y_test=get_preprocessed_dataset(Images,Labels)
    train_data_dir = '../Aidata/aoi_race/train_images'
    train_gen, vaild_gen = ImageGenerator(train_data_dir)
    model = cnn()
    model = model.InceptionV1(256, 256, 3, 6)
    train(model, train_gen, vaild_gen)
コード例 #5
0
    def __init__(self, args):
        super(cnn_RN, self).__init__()

        self.conv = cnn()
        self.batch_size = args[0]
        self.cuda = args[1]
        b = self.batch_size
        cuda = self.cuda

        self.rn = RN(b, 24, 8, 8, 2, cuda=cuda, method=0)
コード例 #6
0
    def __init__(self, args):
        super(cnn_SN, self).__init__()

        self.batch_size = args[0]
        self.cuda = args[1]
        b = self.batch_size
        cuda = self.cuda
        
        # (b, 24, 8, 8)
        self.conv = cnn()
        self.sn = SN(b, 24, 8, 8, 2, cuda=cuda, method=1)
コード例 #7
0
ファイル: DRLEnv.py プロジェクト: cfh19980612/MNIST-DRL
    def __init__(self, Client, k ):
        
        self.client = Client
        self.p = 0.5
        self.Model = []
          
        # small world
        self.G = nx.watts_strogatz_graph(n = self.client, k = k, p = self.p)

        # To DGL graph
        # self.g = dgl.from_networkx(self.G)
        
        # PCA
        self.pca = PCA(n_components = self.client)
        # latency simulation
        self.latency = [[0 for i in range (self.client)]for j in range (self.client)]
        for i in range (self.client):
            for j in range (self.client):
                self.latency[i][j] = random.randint(1,20)

        self.task = cnn(Client = self.client, Dataset = 'MNIST', Net = 'MNISTNet')    # num of clients, num of neighbors, dataset, network
        self.Model, self.global_model = self.task.Set_Environment(Client)
コード例 #8
0
    tm.restoreModel(False)

    with tf.device('/cpu:0'):
        config = tf.ConfigProto(allow_soft_placement=False, log_device_placement=False)
        training_dataset, training_iterations, validation_dataset, validation_iterations, testing_dataset, testing_iterations = \
            loadDatasets(BATCH_SIZE, SHUFFLE_SIZE, BUFFER_SIZE, ROTATE_BATCHES, ROTATIONS, ZOOM_BATCHES, ZOOMS, GAUSSIAN_BATCHES)
        iterator = tf.data.Iterator.from_structure(training_dataset.output_types, training_dataset.output_shapes)
        training_init_op = iterator.make_initializer(training_dataset)
        validation_init_op = iterator.make_initializer(validation_dataset)
        testing_init_op = iterator.make_initializer(testing_dataset)
        global_step = tf.train.create_global_step()
        opt = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)
        batch, label, is_training = iterator.get_next()
        keep_prob = tf.cond(is_training, lambda: KEEP_PROB, lambda: 1.0)       
    with tf.device('/gpu:1'):
        model = cnn(NUM_CLASSES, INPUT_CHANNELS, keep_prob).computationalGraph(batch)     
        model_reshaped = tf.reshape(model,[-1, NUM_CLASSES])
        model_argmax = tf.argmax(model_reshaped, 1)
        label_reshaped = tf.reshape(label,[-1, NUM_CLASSES])
        label_argmax = tf.argmax(label_reshaped, 1)      
        reg = tf.reduce_sum(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES))
        prediction = tf.equal(model_argmax, label_argmax) 
        accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
        cross_entropy =  tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label_reshaped, logits=model_reshaped))
        loss = cross_entropy + LAMBDA_REG*reg  
        train_step = opt.minimize(loss, global_step=global_step)      

    saver = tf.compat.v1.train.Saver()
    with tf.compat.v1.Session(config=config) as sess:
        tm.initSess(sess, saver=saver)       
>>>>>>> af9729adf6c9c0cb80c5393579f33da7c7a62848
コード例 #9
0
def cluster_training(clusters, ID, nb_iter, all_images, all_label_numbers,
                     all_label_names, all_clusters_with_ID, model_path):

    list_nodes_graph = []

    if clusters:

        print("Start Training\n")
        ID_index = -1
        for cluster in clusters:

            # print("ID: ",ID)

            ID_index += 1

            print("cluster number", cluster)
            # NUM_CLASSES

            NUM_CLASSES = len(cluster)
            print('number of species: ', NUM_CLASSES)

            cluster_index = [
                idx for idx, element in enumerate(all_label_numbers)
                if element in cluster
            ]  #give all indexes in all_label_numbers where the label is in cluster

            cluster_images = []
            cluster_labels = []
            cluster_names = []

            for index in cluster_index:  #define the custer images and the labels

                cluster_images.append(all_images[index])
                cluster_labels.append(all_label_numbers[index])
                cluster_names.append(all_label_names[index])  #

            print('cluster composed by', set(cluster_names))
            print('image number:', len(cluster_labels))

            label_dict = dict()

            # list_nodes_graph.append('save_cluster_' + str(ID[ID_index]))

            if len(cluster_labels) >= 4 * batch_size:

                #### transform labels in adapted one-hot encoding ####
                unique = list(set(cluster_labels))
                new_values = list(range(0, len(unique)))

                rectified_labels = []
                for element in cluster_labels:
                    for value in unique:
                        if element == value:
                            rectified_labels.append(
                                new_values[unique.index(value)])
                        label_dict[new_values[unique.index(value)]] = [
                            value, cluster_names[cluster_labels.index(value)]
                        ]

                if not label_dict in all_clusters_with_ID[3::4]:

                    list_nodes_graph.append('save_cluster_' +
                                            str(ID[ID_index]))

                    X_train, Y_train, X_val, Y_val, X_test, Y_test = divide_images_and_labels(
                        np.array(cluster_images, dtype='float32'),
                        np.eye(
                            NUM_CLASSES,
                            dtype='uint8')[rectified_labels])  #cluster_labels

                    all_clusters_with_ID.extend(
                        (ID[ID_index], X_test, Y_test, label_dict)
                    )  #save the images and label for future call in confusion_matrix

                    # PATH_SAVE_MODEL
                    PATH_SAVE_ITER_DIR = COMMON_PATH + '/clusters_saves/iteration_' + str(
                        nb_iter)
                    if not os.path.isdir(PATH_SAVE_ITER_DIR):
                        os.mkdir(PATH_SAVE_ITER_DIR)
                    PATH_SAVE_MODEL_DIR = COMMON_PATH + '/clusters_saves/iteration_' + str(
                        nb_iter) + '/save_cluster_' + str(ID[ID_index])
                    os.mkdir(PATH_SAVE_MODEL_DIR)
                    PATH_SAVE_MODEL = PATH_SAVE_MODEL_DIR + '/save'

                    df = pd.DataFrame(
                        list(set(cluster_names))
                    )  #save the species name regarding each cluster to help the final prediction afterward
                    df.to_csv(
                        PATH_SAVE_MODEL_DIR + '/labels.csv', index=False
                    )  #to load afterward : my_data = np.genfromtxt(PATH_SAVE_MODEL_DIR+'labels.csv', dtype=None,encoding=None)[1:]

                    cnn(NUM_CLASSES, X_train, Y_train, X_test, Y_test, X_val,
                        Y_val, PATH_SAVE_MODEL, batch_size, img_size,
                        model_path)
                    print("DL model " + str(ID[ID_index]) + " saved")
                    print("cluster trained (DL)-\n")

                else:
                    print(
                        " /!\ cluster already present at the previous iteration, no training  /!\ "
                    )

            else:
                print("using SVM:")

                X_train, Y_train, X_val, Y_val, X_test, Y_test = divide_images_and_labels(
                    np.array(cluster_images, dtype='float32'),
                    cluster_labels)  # cluster_labels

                for element in cluster_labels:
                    label_dict[element] = [
                        element, cluster_names[cluster_labels.index(element)]
                    ]

                father_cluster_ID = int(
                    model_path.split('/')[-2].split('_')[-1])

                all_clusters_with_ID.extend(
                    (ID[ID_index], father_cluster_ID, 0, label_dict))

                nsamples, x, y, z = (X_train).shape
                X_train = (X_train).reshape((nsamples, x * y))

                clf = svm.SVC()
                clf.fit(X_train, Y_train)

                PATH_SAVE_ITER_DIR = COMMON_PATH + '/clusters_saves/iteration_' + str(
                    nb_iter)
                if not os.path.isdir(PATH_SAVE_ITER_DIR):
                    os.mkdir(PATH_SAVE_ITER_DIR)
                PATH_SAVE_MODEL_DIR = COMMON_PATH + '/clusters_saves/iteration_' + str(
                    nb_iter) + '/save_cluster_' + str(ID[ID_index])
                os.mkdir(PATH_SAVE_MODEL_DIR)
                PATH_SAVE_MODEL = PATH_SAVE_MODEL_DIR + '/save'

                df = pd.DataFrame(
                    list(set(cluster_names))
                )  # save the species name regarding each cluster to help the final prediction afterward
                df.to_csv(
                    PATH_SAVE_MODEL_DIR + '/labels.csv', index=False
                )  # to load afterward : my_data = np.genfromtxt(PATH_SAVE_MODEL_DIR+'labels.csv', dtype=None,encoding=None)[1:]

                joblib.dump(clf, PATH_SAVE_MODEL)
                print("SVM model " + str(ID[ID_index]) + " saved")

                print("cluster trained (SVM)-\n")

    else:
        print("no more cluster - no training needed")

    return (all_clusters_with_ID, list_nodes_graph)
コード例 #10
0
ファイル: NN_run_training.py プロジェクト: xlcodeme/VIN
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--input")
    parser.add_argument("--output", default="None")
    parser.add_argument("--epochs", type=int, default=10)
    parser.add_argument("--profile", action="store_true")
    parser.add_argument("--dropout", action="store_true")
    parser.add_argument("--stepsize", type=float, default=.0002)
    parser.add_argument("--model",
                        choices=[
                            "dense1", "dense2", "dense3", "conv",
                            "valIterMultiBatch", "valIterBatch", "valIterMars",
                            "valIterMarsSingle", "valIterBatchUntied", "fcn",
                            "cnn"
                        ],
                        default="dense")
    parser.add_argument("--unittest", action="store_true")
    parser.add_argument("--grad_check", action="store_true")
    parser.add_argument("--devtype", choices=["cpu", "gpu"], default="cpu")
    parser.add_argument("--warmstart", default="None")
    parser.add_argument("--reg", type=float, default=.0)
    parser.add_argument("--imsize", type=int, default=28)
    parser.add_argument("--k", type=int, default=10)
    parser.add_argument("--batchsize", type=int, default=128)
    parser.add_argument("--statebatchsize", type=int, default=1)
    parser.add_argument("--stepdecreaserate", type=float, default=1.0)
    parser.add_argument("--stepdecreasetime", type=int, default=10000)
    parser.add_argument("--data_fraction", type=float, default=1.0)
    args = parser.parse_args()

    if args.model == "fcn":
        # FCN network
        my_nn = fcn(model=args.model,
                    im_size=[args.imsize, args.imsize],
                    dropout=args.dropout,
                    devtype=args.devtype,
                    grad_check=args.grad_check,
                    reg=args.reg,
                    batchsize=args.batchsize,
                    statebatchsize=args.statebatchsize)
    elif args.model == "cnn":
        # FCN network
        my_nn = cnn(model=args.model,
                    im_size=[args.imsize, args.imsize],
                    dropout=args.dropout,
                    devtype=args.devtype,
                    grad_check=args.grad_check,
                    reg=args.reg,
                    batchsize=args.batchsize)
    elif args.model == "valIterBatch":
        # VI network
        my_nn = vin(model=args.model,
                    im_size=[args.imsize, args.imsize],
                    dropout=args.dropout,
                    devtype=args.devtype,
                    grad_check=args.grad_check,
                    reg=args.reg,
                    k=args.k,
                    batchsize=args.batchsize,
                    statebatchsize=args.statebatchsize)
    elif args.model == "valIterBatchUntied":
        # VI network with untied weights
        my_nn = vin_untied(model=args.model,
                           im_size=[args.imsize, args.imsize],
                           dropout=args.dropout,
                           devtype=args.devtype,
                           grad_check=args.grad_check,
                           reg=args.reg,
                           k=args.k,
                           batchsize=args.batchsize,
                           statebatchsize=args.statebatchsize)
    else:
        # FC network
        my_nn = NNobj(model=args.model,
                      im_size=[args.imsize, args.imsize],
                      dropout=args.dropout,
                      devtype=args.devtype,
                      grad_check=args.grad_check,
                      reg=args.reg)
    if args.warmstart != "None":
        print('warmstarting...')
        my_nn.load_weights(args.warmstart)
    my_nn.run_training(input=str(args.input),
                       stepsize=args.stepsize,
                       epochs=args.epochs,
                       grad_check=args.grad_check,
                       batch_size=args.batchsize,
                       data_fraction=args.data_fraction)
    my_nn.save_weights(outfile=str(args.output))
コード例 #11
0
ファイル: test.py プロジェクト: baptistepouthier/plancton
if run_from_zero:
    all_images, all_label_names, all_label_numbers = prepare_images()

    # save
    np.save(COMMON_PATH + "/all_images", all_images)
    np.save(COMMON_PATH + "/all_label_names", all_label_names)
    np.save(COMMON_PATH + "/all_label_numbers", all_label_numbers)

    X_train, Y_train, X_val, Y_val, X_test, Y_test = divide_images_and_labels(np.array(all_images, dtype='float32'),np.eye(121, dtype='uint8')[all_label_numbers])

    # save
    np.save(COMMON_PATH + "/X_test", X_test)
    np.save(COMMON_PATH + "/Y_test", Y_test)

    cnn(121, X_train, Y_train, X_test, Y_test, X_val, Y_val, COMMON_PATH+'/clusters_saves/iteration_0/save_cluster_0/save',batch_size,img_size,None)

else:
    all_images = np.load(COMMON_PATH + '/all_images.npy')
    all_label_names = np.load(COMMON_PATH + '/all_label_names.npy')
    all_label_numbers = np.load(COMMON_PATH + '/all_label_numbers.npy')

    X_test = np.load(COMMON_PATH+'/X_test.npy')
    Y_test = np.load(COMMON_PATH+'/Y_test.npy')


empty_dict=dict()
all_clusters_with_ID=[0,X_test,Y_test,empty_dict]
clusters = True

graph_archi = nx.Graph()