コード例 #1
0
def main():
    ttv_info = ttv_yaml_to_dict(THIS_DIR + 'ttv_berlin.yaml')
    print("GETTING SPECTORGRAM DATA...")
    spectrogram_data = ttv_to_spectrograms(
        ttv_info,
        normalise_waveform=normalise,
        normalise_spectrogram=slice_spectrogram,
        cache=THIS_DIR + 'berlin')

    emotion = 'happy'
    test, train, val = ttv_data = learning.split_ttv(spectrogram_data,
                                                     category=emotion)

    test['x'] = np.reshape(test['x'], (test['x'].shape[0], ) + (1, ) +
                           test['x'].shape[1:])
    train['x'] = np.reshape(train['x'], (train['x'].shape[0], ) + (1, ) +
                            train['x'].shape[1:])
    val['x'] = np.reshape(val['x'],
                          (val['x'].shape[0], ) + (1, ) + val['x'].shape[1:])

    learning.train(
        make_model,
        ttv_data,
        'model_happy',
        path_to_results=THIS_DIR,
        generate_callbacks=generate_callbacks,
        number_of_epochs=200,
        dry_run=False,
        end_training=lambda x: x >= 1,
        # classification='happy'
        # to_terminal=True
        # class_weight={0:10, 1:1}
    )
コード例 #2
0
def main():
    ttv_info = ttv_yaml_to_dict(THIS_DIR + 'ttv_rb.yaml')
    print("GETTING SPECTORGRAM DATA...")
    spectrogram_data = ttv_to_spectrograms(
        ttv_info,
        normalise_waveform=normalise,
        normalise_spectrogram=slice_spectrogram,
        cache=THIS_DIR
    )

    for emotion in EMOTIONS:
        print('TRAINING ON:', emotion)
        test, train, val = ttv_data = learning.split_ttv(spectrogram_data, category=emotion)

        test['x']  = np.reshape(test['x'],  (test['x'].shape[0] ,) + (1,) + test['x'].shape[1:] )
        train['x'] = np.reshape(train['x'], (train['x'].shape[0],) + (1,) + train['x'].shape[1:]  )
        val['x']   = np.reshape(val['x'],   (val['x'].shape[0]  ,) + (1,) + val['x'].shape[1:]  )

        learning.train(
            make_model,
            ttv_data,
            'model_' + emotion,
            path_to_results=THIS_DIR,
            generate_callbacks=generate_callbacks,
            number_of_epochs=200,
            dry_run=False,
            end_training=lambda x: x>=1,
            # classification='happy'
            # to_terminal=True
            class_weight={0:1, 1:100}
        )
コード例 #3
0
def main():
    ttv_info = ttv_yaml_to_dict(THIS_DIR + '../ttvs/ttv_rb.yaml')
    print("GETTING SPECTORGRAM DATA...")
    spectrogram_data = ttv_to_spectrograms(
        ttv_info,
        normalise_waveform=normalise,
        normalise_spectrogram=slice_spectrogram,
        cache=THIS_DIR
    )

    test, train, val = ttv_data = learning.split_ttv(spectrogram_data)

    # test['x']  = np.reshape(test['x'],  (test['x'].shape[0] ,) + (1,) + test['x'].shape[1:] )
    # train['x'] = np.reshape(train['x'], (train['x'].shape[0],) + (1,) + train['x'].shape[1:]  )
    # val['x']   = np.reshape(val['x'],   (val['x'].shape[0]  ,) + (1,) + val['x'].shape[1:]  )


    learning.train(
        make_model,
        ttv_data,
        THIS_DIR + 'model',
        path_to_results=THIS_DIR,
        generate_callbacks=generate_callbacks,
        number_of_epochs=200
    )
コード例 #4
0
ファイル: server.py プロジェクト: akshos/MERE
def main():
    option = 'n'
    print '---------------------------------------------------'
    print 'MATHEMATICAL EXPRESSION RECOGNITION AND EVALUATION'
    print '---------------------------------------------------'
    while option != 'q':
        print 'Main Menu (Classifier: ' + svm_nn + ')'
        print '(s)Standalone Mode \n(S)Server Mode \n(t)Train \n(g)Generate Training Set \n(e)Test \n(c)Change Classifier \n(q)Quit'
        option = str(raw_input('Option: '))
        if option == 'q':
            break
        elif option == 't':
            learning.train()
        elif option == 'g':
            learning.generateTrainingSet()
        elif option == 'e':
            imageFileName = str(raw_input('Enter image file name : '))
            if imageFileName == 'q':
                continue
            test(imageFileName)
        elif option == 's':
            print 'Started in Standalone mode'
            imageFileName = str(raw_input('Enter image file name : '))
            if imageFileName == 'q':
                continue
            standalone(imageFileName)
        elif option == 'S':
            server()
        elif option == 'c':
            changeClassifier()
        else:
            print 'Please enter a valid option'
コード例 #5
0
 def train(self, event):
     # Save the new training examples and start trainging
     import learning as Ln
     import scipy.io as spio
     Total = {'X': self.X, 'y': self.y}
     spio.savemat('sample.mat', Total)
     showinfo(
         "Training",
         "Press 'OK' to start! \nThere'll be another window appear when finished!\nPLEASE WAIT\n"
     )
     Ln.train('sample.mat', 'Theta.mat')
     showinfo("Training", "Training Finished!")
コード例 #6
0
def train_all_image_in_folder(foldername, status):
    #foldername: string, status: one hot [0,1] (yes) or [1,0] (no)
    trlabel = []
    trfeat = []
    #input all image file in folder
    for file in os.listdir(foldername):
        if file.endswith('.png') or file.endswith('.jpg') or file.endswith(
                '.jpeg'):
            train_array = convert_to_ndarray(foldername + '/' + file, [56, 56])
            trfeat.append(train_array)
            trlabel.append(status)
    train(trfeat, trlabel)
コード例 #7
0
def main():
    ttv_info = ttv_yaml_to_dict(THIS_DIR + 'ttv1.yaml')
    print("GETTING WAVEFORM DATA...")
    ttv_data = ttv_to_waveforms(ttv_info, normalise=normalise, cache=THIS_DIR + 'ttv1.cache.hdf5')

    learning.train(
        make_mlp_model,
        ttv_data,
        'experiment1',
        path_to_results='experiments/RAVDESS_MLP',
        generate_callbacks=generate_callbacks,
        number_of_epochs=2,
        # dry_run=True,
        to_terminal=True
    )
コード例 #8
0
def main(argv=None):
    train_sources, train_targets, valid_sources, valid_targets, test_sources, test_targets, \
    source_vocab, source_idx2char, target_vocab, target_idx2char = prepare_g2p_data(FLAGS)

    expeirment = Experiment(source_vocab, source_idx2char, target_vocab,
                            target_idx2char, FLAGS, VERSION)

    if not FLAGS.eval_only:
        train(train_sources, train_targets, valid_sources, valid_targets,
              expeirment, "[TR]")
        ops.reset_default_graph()

    if FLAGS.model_parameter_saving or FLAGS.eval_only:
        test(valid_sources, valid_targets, test_sources, test_targets,
             expeirment, "[TE]")
コード例 #9
0
def main():
    ttv_info = ttv_yaml_to_dict(THIS_DIR + 'ttv1.yaml')
    print("GETTING WAVEFORM DATA...")
    ttv_data = ttv_to_waveforms(ttv_info,
                                normalise=normalise,
                                cache=THIS_DIR + 'ttv1.cache.hdf5')

    learning.train(
        make_mlp_model,
        ttv_data,
        'experiment1',
        path_to_results='experiments/RAVDESS_MLP',
        generate_callbacks=generate_callbacks,
        number_of_epochs=2,
        # dry_run=True,
        to_terminal=True)
コード例 #10
0
 def reset(self):
     # Replace 'sample.mat' by 'reset.mat' and train again
     showinfo(
         "Reset",
         "Press 'OK' to start! \nThere'll be another window appear when finished!\nPLEASE WAIT\n"
     )
     import os
     from shutil import copy
     try:
         os.remove('sample.mat')
         copy('reset.mat', 'sample.mat')
     except:
         print "File Error"
         showinfo(
             "Reset",
             "Error while reset!\n(file missing or permission denied)")
         return None
     import learning as LN
     LN.train('sample.mat', 'Theta.mat')
     showinfo("Reset", "Reset successfully!")
コード例 #11
0
def main():
    ttv_file = 'ttv_bt.yaml'
    ttv_info = yaml_to_dict(THIS_DIR + ttv_file)
    print("GETTING SPECTORGRAM DATA...")
    spectrogram_data = ttv_to_spectrograms(
        ttv_info,
        normalise_waveform=normalise_waveform,
        normalise_spectrogram=slice_spectrogram,
        cache=THIS_DIR,
    )
    test, train, validation = ttv_data = learning.split_ttv(spectrogram_data)

    learning.train(make_mlp_model,
                   ttv_data,
                   'mlp_spectrogram_' + ttv_file.split('.')[0],
                   path_to_results=THIS_DIR,
                   generate_callbacks=generate_callbacks,
                   number_of_epochs=200,
                   dry_run=False
                   # to_terminal=True
                   )
コード例 #12
0
def main():
    ttv_info = ttv_yaml_to_dict(THIS_DIR + '../ttvs/ttv_rb.yaml')
    print("GETTING SPECTORGRAM DATA...")
    spectrogram_data = ttv_to_spectrograms(
        ttv_info,
        normalise_waveform=normalise,
        normalise_spectrogram=slice_spectrogram,
        cache=THIS_DIR)

    test, train, val = ttv_data = learning.split_ttv(spectrogram_data)

    # test['x']  = np.reshape(test['x'],  (test['x'].shape[0] ,) + (1,) + test['x'].shape[1:] )
    # train['x'] = np.reshape(train['x'], (train['x'].shape[0],) + (1,) + train['x'].shape[1:]  )
    # val['x']   = np.reshape(val['x'],   (val['x'].shape[0]  ,) + (1,) + val['x'].shape[1:]  )

    learning.train(make_model,
                   ttv_data,
                   THIS_DIR + 'model',
                   path_to_results=THIS_DIR,
                   generate_callbacks=generate_callbacks,
                   number_of_epochs=200)
コード例 #13
0
def main():
    ttv_info = ttv_yaml_to_dict(THIS_DIR + 'ttv1.yaml')
    print("GETTING WAVEFORM DATA...")
    ttv_data = ttv_to_waveforms(ttv_info, normalise=normalise, cache=THIS_DIR + 'ttv1.cache.hdf5')

    test, train, val = ttv_data
    test['x'] = np.reshape(test['x'], test['x'].shape + (1,))
    train['x'] = np.reshape(train['x'], train['x'].shape + (1,))
    val['x'] = np.reshape(val['x'], val['x'].shape + (1,))


    learning.train(
        make_mlp_model,
        ttv_data,
        '1D_CNN_RAVDESS',
        path_to_results='experiments/RAVDESS_MLP',
        generate_callbacks=generate_callbacks,
        number_of_epochs=200,
        dry_run=True,
        to_terminal=True
    )
コード例 #14
0
def main():
    ttv_file = 'ttv_bt.yaml'
    ttv_info = yaml_to_dict(THIS_DIR + ttv_file)
    print("GETTING SPECTORGRAM DATA...")
    spectrogram_data = ttv_to_spectrograms(
        ttv_info,
        normalise_waveform=normalise_waveform,
        normalise_spectrogram=slice_spectrogram,
        cache=THIS_DIR,
    )
    test, train, validation = ttv_data = learning.split_ttv(spectrogram_data)

    learning.train(
        make_mlp_model,
        ttv_data,
        'mlp_spectrogram_' + ttv_file.split('.')[0],
        path_to_results=THIS_DIR,
        generate_callbacks=generate_callbacks,
        number_of_epochs=200,
        dry_run=False
        # to_terminal=True
    )
コード例 #15
0
def main(_):
    assert six.PY3
    assert 1 == FLAGS.num_clones

    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default():
        #######################
        # Config model_deploy #
        #######################
        deploy_config = model_deploy.DeploymentConfig(
            num_clones=FLAGS.num_clones,
            clone_on_cpu=FLAGS.clone_on_cpu,
            replica_id=FLAGS.task,
            num_replicas=FLAGS.worker_replicas,
            num_ps_tasks=FLAGS.num_ps_tasks)

        # Create global_step
        with tf.device(deploy_config.variables_device()):
            global_step = slim.create_global_step()

        ######################
        # Select the dataset #
        ######################
        dataset = trainset

        ######################
        # Select the network #
        ######################
        network_fn = nets_factory.get_network_fn(
            FLAGS.model_name,
            num_classes=(dataset.num_classes - FLAGS.labels_offset),
            weight_decay=FLAGS.weight_decay,
            is_training=True)

        #####################################
        # Select the preprocessing function #
        #####################################
        preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
        image_preprocessing_fn = trainset.get_tf_preprocess_image(
            is_training=True)

        ##############################################################
        # Create a dataset provider that loads data from the dataset #
        ##############################################################
        with tf.device(deploy_config.inputs_device()):
            assert FLAGS.train_image_size is not None
            # assert FLAGS.train_image_size == network_fn.default_image_size
            train_image_size = FLAGS.train_image_size or network_fn.default_image_size

        ####################
        # Define the model #
        ####################
        def clone_fn(batch_queue):
            """Allows data parallelism by creating multiple clones of network_fn."""
            images = tf.placeholder(tf.float32,
                                    shape=(FLAGS.batch_size, train_image_size,
                                           train_image_size, 3))
            labels = tf.placeholder(tf.float32,
                                    shape=(FLAGS.batch_size,
                                           dataset.num_classes))
            trainset.set_holders(images, labels)
            logits, end_points = network_fn(
                tf.concat([
                    tf.expand_dims(
                        image_preprocessing_fn(images[i], train_image_size,
                                               train_image_size), 0)
                    for i in range(FLAGS.batch_size)
                ], 0))
            logits = tf.squeeze(logits)

            #############################
            # Specify the loss function #
            #############################
            if 'AuxLogits' in end_points:
                slim.losses.softmax_cross_entropy(
                    end_points['AuxLogits'],
                    labels,
                    label_smoothing=FLAGS.label_smoothing,
                    weights=0.4,
                    scope='aux_loss')
            slim.losses.softmax_cross_entropy(
                logits,
                labels,
                label_smoothing=FLAGS.label_smoothing,
                weights=1.0)
            return end_points

        # Gather initial summaries.
        summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))

        clones = model_deploy.create_clones(deploy_config, clone_fn, [None])
        first_clone_scope = deploy_config.clone_scope(0)
        # Gather update_ops from the first clone. These contain, for example,
        # the updates for the batch_norm variables created by network_fn.
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                       first_clone_scope)

        # Add summaries for end_points.
        end_points = clones[0].outputs
        for end_point in end_points:
            x = end_points[end_point]
            # summaries.add(tf.summary.histogram('activations/' + end_point, x))
            summaries.add(
                tf.summary.scalar('sparsity/' + end_point,
                                  tf.nn.zero_fraction(x)))

        # Add summaries for losses.
        for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
            summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))

        # Add summaries for variables.
        # for variable in slim.get_model_variables():
        #   summaries.add(tf.summary.histogram(variable.op.name, variable))

        #################################
        # Configure the moving averages #
        #################################
        if FLAGS.moving_average_decay:
            moving_average_variables = slim.get_model_variables()
            variable_averages = tf.train.ExponentialMovingAverage(
                FLAGS.moving_average_decay, global_step)
        else:
            moving_average_variables, variable_averages = None, None

        #########################################
        # Configure the optimization procedure. #
        #########################################
        with tf.device(deploy_config.optimizer_device()):
            learning_rate = _configure_learning_rate(dataset.num_samples,
                                                     global_step)
            optimizer = _configure_optimizer(learning_rate)
            summaries.add(tf.summary.scalar('learning_rate', learning_rate))

        if FLAGS.sync_replicas:
            # If sync_replicas is enabled, the averaging will be done in the chief
            # queue runner.
            optimizer = tf.train.SyncReplicasOptimizer(
                opt=optimizer,
                replicas_to_aggregate=FLAGS.replicas_to_aggregate,
                total_num_replicas=FLAGS.worker_replicas,
                variable_averages=variable_averages,
                variables_to_average=moving_average_variables)
        elif FLAGS.moving_average_decay:
            # Update ops executed locally by trainer.
            update_ops.append(
                variable_averages.apply(moving_average_variables))

        # Variables to train.
        variables_to_train = _get_variables_to_train()

        #  and returns a train_tensor and summary_op
        total_loss, clones_gradients = model_deploy.optimize_clones(
            clones, optimizer, var_list=variables_to_train)
        # Add total_loss to summary.
        summaries.add(tf.summary.scalar('total_loss', total_loss))

        # Create gradient updates.
        grad_updates = optimizer.apply_gradients(clones_gradients,
                                                 global_step=global_step)
        update_ops.append(grad_updates)

        update_op = tf.group(*update_ops)
        with tf.control_dependencies([update_op]):
            train_tensor = tf.identity(total_loss, name='train_op')

        # Add the summaries from the first clone. These contain the summaries
        # created by model_fn and either optimize_clones() or _gather_clone_loss().
        summaries |= set(
            tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope))

        # Merge all summaries together.
        summary_op = tf.summary.merge(list(summaries), name='summary_op')

        ###########################
        # Kicks off the training. #
        ###########################
        session_config = tf.ConfigProto()
        session_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.per_process_gpu_memory_fraction

        learning.train(
            train_tensor,
            session_config=session_config,
            train_step_fn=train_step,
            logdir=FLAGS.train_dir,
            master=FLAGS.master,
            is_chief=(FLAGS.task == 0),
            init_fn=_get_init_fn(),
            summary_op=summary_op,
            number_of_steps=FLAGS.max_number_of_steps,
            log_every_n_steps=FLAGS.log_every_n_steps,
            save_summaries_secs=FLAGS.save_summaries_secs,
            save_interval_secs=FLAGS.save_interval_secs,
            sync_optimizer=optimizer if FLAGS.sync_replicas else None)
コード例 #16
0
            f'(acc: {accs[best_index]:.3f}) - evaluation: {time_evaluation:.1f} s, '
            f'champion evaluation: {time_champion_evaluation:.1f} s')
        writer.add_scalar('best/reward', rewards[best_index], generation)
        writer.add_scalar('best/acc', accs[best_index], generation)

        if generation % 20 == 0:
            if 'long_training_reward' not in champion:

                # Train champion net for more epochs.
                # TODO: Do this more elegantly. Maybe make an additional
                # parameter num_epochs_long.
                long_params = params.copy()
                long_params['num_epochs'] = 10
                champion['net'].create_torch_layers(device)
                loss, acc = train(champion['net'],
                                  train_dataset,
                                  long_params,
                                  device=device)
                champion['long_training_reward'] = -get_performance_value(
                    loss, period='last_epoch')
                champion['long_training_acc'] = get_performance_value(
                    acc, period='last_epoch')

                # Evaluate this long trained net on test set.
                loss, acc = test(champion['net'],
                                 test_dataset,
                                 params,
                                 device=device)
                champion['test_reward'] = -loss
                champion['test_acc'] = acc

                # Manually delete weight matrices, so they don't block memory
コード例 #17
0
import torch.optim as optim

from tqdm import tqdm
import os

from model import Siamese, SuperSiamese
from dataset import TrajectoryDataset, SuperTrajectoryDataset
from learning import train, test
from utils import train_test_split

device = torch.device("cuda")
num_drivers = 500
traj_len = 64
super = 8
n_features = 17
test_split = 0.2
batch_size = 500
num_epochs = 500

# dataset = TrajectoryDataset(traj_len = traj_len, num_drivers = num_drivers, device = device)
# model = Siamese(input_size = n_features).to(device)

dataset = SuperTrajectoryDataset(traj_len = traj_len, super = super, num_drivers = num_drivers, device = device)
model = SuperSiamese(input_size = n_features, split = super).to(device)

trainloader, testloader = train_test_split(dataset, test_split, batch_size)

model = train(model, trainloader, testloader, device, num_epochs)

model.save('model.pth')
コード例 #18
0
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
import utility, learning

#init parser
parser = ArgumentParser(description="Train neural networks on an image dataset!")
parser.add_argument('data_dir', type=str, help="Directory of the images")
parser.add_argument('--save_dir', type=str, default="", help="Directory to save the model (Default: Current Directory)")
parser.add_argument('--arch', type=str, default="vgg19", help="Neural Network Architecture (Default: vgg19)")
parser.add_argument('--learning_rate', type=float, default="0.001", help="Learningrate (Default: 0.001)")
parser.add_argument('--hidden_units', type=int, default="4096", help="Number of hidden units (Default: 4096)")
parser.add_argument('--epochs', type=int, default="3", help="Number of epochs (Default: 3)")
parser.add_argument('--gpu', action='store_true', help="Use GPU for training (Default: False)")
args = parser.parse_args()

#load and transform data
trainloader, validloader, testloader, class_to_idx = utility.load_transform_data(args.data_dir)

#build model, loss function, and optimizer
model = learning.build_model(args.arch, args.hidden_units, len(class_to_idx), args.gpu)
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr = args.learning_rate)

#train model
learning.train(model, criterion, optimizer, trainloader, validloader, args.epochs, args.gpu)

#save checkpoint
learning.save_model(model, args.arch, criterion, args.epochs, args.learning_rate, optimizer, class_to_idx, args.save_dir)
コード例 #19
0
ファイル: main.py プロジェクト: LucasMagnana/bikesharing
def main(args):

    cuda = False
    #  gpx_pathfindind_cycling
    with open(args.path + "files/osmnx_pathfinding_simplified.df",
              'rb') as infile:
        df_pathfinding = pickle.load(infile)
    with open(args.path + "files/gpx_matched_simplified.df", 'rb') as infile:
        df_simplified = pickle.load(infile)
    with open(args.path + "files/cluster_dbscan_custom.tab", 'rb') as infile:
        tab_clusters = pickle.load(infile)
    with open(args.path + "files/voxels_pathfinding.dict", 'rb') as infile:
        dict_voxels = pickle.load(infile)

    df = df_pathfinding

    tab_routes_voxels, _ = voxels.create_dict_vox(df, df.iloc[0]["route_num"],
                                                  df.iloc[-1]["route_num"])

    tab_routes_voxels_int = []

    df_voxels = pd.DataFrame()

    df_voxels_train = pd.DataFrame()
    df_voxels_test = pd.DataFrame()

    for i in range(len(tab_routes_voxels)):
        nb_vox = 0
        tab_routes_voxels_int.append([])
        route = tab_routes_voxels[i]
        for vox in route:
            if (nb_vox % args.voxels_frequency == 0):
                vox_str = vox.split(";")
                vox_int = [int(vox_str[0]), int(vox_str[1])]
                tab_points = voxels.get_voxel_points(vox_int)
                #points = tab_points[0][:2]+tab_points[1][:2]+tab_points[2][:2]+tab_points[3][:2]
                if vox not in dict_voxels:
                    points = [-1, i + 1]
                else:
                    points = [dict_voxels[vox]["cluster"], i + 1]
                tab_routes_voxels_int[i].append(points)
            nb_vox += 1
        '''tab_routes_voxels_int[i] = sorted(tab_routes_voxels_int[i], key=lambda k: dict_voxels[str(voxels.find_voxel_int([k[0],k[1]])[0])+";"+str(voxels.find_voxel_int([k[0],k[1]])[1])]["cyclability_coeff"])
        
        if(len(tab_routes_voxels_int[i])>50):
            tab_routes_voxels_int[i] = tab_routes_voxels_int[i][:50]
        
        print(len(tab_routes_voxels_int[i]))'''

        df_temp = pd.DataFrame(tab_routes_voxels_int[i], dtype=object)
        df_temp["route_num"] = i + 1
        df_voxels = df_voxels.append(df_temp)

        proba_test = random.random()
        if (proba_test >= args.percentage_test / 100):
            df_voxels_train = df_voxels_train.append(df_temp)
        else:
            df_voxels_test = df_voxels_test.append(df_temp)

    #print(len(df_voxels), len(df_voxels_train), len(df_voxels_test))

    df_train = df_voxels_train
    df_test = df_voxels_test

    if (len(df_test) == 0):
        df_test = df_train

    size_data = 1

    learning_rate = args.lr

    fc = NN(size_data, max(tab_clusters) + 1)
    rnn = RNN(size_data, max(tab_clusters) + 1)
    lstm = RNN_LSTM(size_data,
                    max(tab_clusters) + 1, args.hidden_size, args.num_layers)

    network = lstm

    if (cuda):
        network = network.cuda()

    optimizer = torch.optim.Adam(network.parameters(), lr=learning_rate)
    loss = nn.NLLLoss()

    tab_loss, tab_predict = learning.train(df_train, tab_clusters, loss,
                                           optimizer, network, size_data, cuda,
                                           args.num_samples, df_test)

    g_predict = learning.test(df_test, None, tab_clusters, size_data, cuda)
    print("Random:", g_predict * 100, "%")

    g_predict = learning.test(df_test, network, tab_clusters, size_data, cuda)
    print("Good predict:", g_predict * 100, "%")

    if (g_predict > 0.95):
        print("Saving network...")
        torch.save(network.state_dict(), args.path + "/files/network_temp.pt")

    plt.plot(tab_loss)
    plt.ylabel('Error')
    plt.show()

    plt.plot(tab_predict)
    plt.ylabel('Prediction')
    plt.show()
    '''import torch
コード例 #20
0
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default():
        ######################
        # Config model_deploy#
        ######################
        deploy_config = model_deploy.DeploymentConfig(
            num_clones=FLAGS.num_clones,
            clone_on_cpu=FLAGS.clone_on_cpu,
            replica_id=FLAGS.task,
            num_replicas=FLAGS.worker_replicas,
            num_ps_tasks=FLAGS.num_ps_tasks)

        # Create global_step
        with tf.device(deploy_config.variables_device()):
            global_step = slim.create_global_step()

        ######################
        # Select the dataset #
        ######################
        dataset = dataset_factory.get_dataset(FLAGS.dataset_name,
                                              FLAGS.dataset_split_name,
                                              FLAGS.dataset_dir)

        ####################
        # Select the network #
        ####################
        network_fn = nets_factory.get_network_fn(
            FLAGS.model_name,
            num_classes=(dataset.num_classes - FLAGS.labels_offset),
            weight_decay=FLAGS.weight_decay,
            is_training=True)

        #####################################
        # Select the preprocessing function #
        #####################################
        preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
        image_preprocessing_fn = preprocessing_factory.get_preprocessing(
            preprocessing_name, is_training=True)

        ##############################################################
        # Create a dataset provider that loads data from the dataset #
        ##############################################################
        with tf.device(deploy_config.inputs_device()):
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=FLAGS.num_readers,
                common_queue_capacity=20 * FLAGS.batch_size,
                common_queue_min=10 * FLAGS.batch_size)
            [image, label] = provider.get(['image', 'label'])
            label -= FLAGS.labels_offset

            train_image_size = FLAGS.train_image_size or network_fn.default_image_size

            image = image_preprocessing_fn(image, train_image_size,
                                           train_image_size)

            images, labels = tf.train.batch(
                [image, label],
                batch_size=FLAGS.batch_size,
                num_threads=FLAGS.num_preprocessing_threads,
                capacity=5 * FLAGS.batch_size)
            labels = slim.one_hot_encoding(
                labels, dataset.num_classes - FLAGS.labels_offset)
            batch_queue = slim.prefetch_queue.prefetch_queue(
                [images, labels], capacity=2 * deploy_config.num_clones)

        ####################
        # Define the model #
        ####################
        def clone_fn(batch_queue):
            """Allows data parallelism by creating multiple clones of network_fn."""
            images, labels = batch_queue.dequeue()
            logits, end_points = network_fn(images)

            #############################
            # Specify the loss function #
            ############################
            accurancy = 0
            global logits_global
            global labels_global

            logits_global = logits
            labels_global = labels

            if 'AuxLogits' in end_points:
                print('auxlogits')
                slim.losses.softmax_cross_entropy(
                    end_points['AuxLogits'],
                    labels,
                    label_smoothing=FLAGS.label_smoothing,
                    weight=0.4,
                    scope='aux_loss')
                slim.losses.softmax_cross_entropy(
                    logits,
                    labels,
                    label_smoothing=FLAGS.label_smoothing,
                    weight=1.0)
                #predictions = tf.argmax(logits, 1)
                #accurancy =

                return end_points
            return end_points

        global logits_global
        global labels_global
        # Gather initial summaries.
        summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))

        clones = model_deploy.create_clones(deploy_config, clone_fn,
                                            [batch_queue])

        print(logits_global)
        print(labels_global)

        predictions = tf.squeeze(tf.argmax(logits_global, 1))
        labels = tf.squeeze(tf.argmax(labels_global, 1))

        accurancy = 1.0 - slim.metrics.streaming_accuracy(predictions,
                                                          labels)[1]
        #accurancies = get_loss(batch_queue)
        first_clone_scope = deploy_config.clone_scope(0)
        # Gather update_ops from the first clone. These contain, for example,
        # the updates for the batch_norm variables created by network_fn.
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                       first_clone_scope)

        # Add summaries for end_points.
        end_points = clones[0].outputs
        for end_point in end_points:
            x = end_points[end_point]
            summaries.add(tf.histogram_summary('activations/' + end_point, x))
            summaries.add(
                tf.scalar_summary('sparsity/' + end_point,
                                  tf.nn.zero_fraction(x)))

        # Add summaries for losses.
        for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
            summaries.add(tf.scalar_summary('losses/%s' % loss.op.name, loss))

        # Add summaries for variables.
        for variable in slim.get_model_variables():
            summaries.add(tf.histogram_summary(variable.op.name, variable))

        #################################
        # Configure the moving averages #
        #################################
        if FLAGS.moving_average_decay:
            moving_average_variables = slim.get_model_variables()
            variable_averages = tf.train.ExponentialMovingAverage(
                FLAGS.moving_average_decay, global_step)
        else:
            moving_average_variables, variable_averages = None, None

        #########################################
        # Configure the optimization procedure. #
        #########################################
        with tf.device(deploy_config.optimizer_device()):
            learning_rate = _configure_learning_rate(dataset.num_samples,
                                                     global_step)
            optimizer = _configure_optimizer(learning_rate)
            summaries.add(
                tf.scalar_summary('learning_rate',
                                  learning_rate,
                                  name='learning_rate'))

        if FLAGS.sync_replicas:
            # If sync_replicas is enabled, the averaging will be done in the chief
            # queue runner.
            optimizer = tf.train.SyncReplicasOptimizer(
                opt=optimizer,
                replicas_to_aggregate=FLAGS.replicas_to_aggregate,
                variable_averages=variable_averages,
                variables_to_average=moving_average_variables,
                replica_id=tf.constant(FLAGS.task, tf.int32, shape=()),
                total_num_replicas=FLAGS.worker_replicas)
        elif FLAGS.moving_average_decay:
            # Update ops executed locally by trainer.
            update_ops.append(
                variable_averages.apply(moving_average_variables))

        # Variables to train.
        variables_to_train = _get_variables_to_train()

        #  and returns a train_tensor and summary_op
        total_loss, clones_gradients = model_deploy.optimize_clones(
            clones, optimizer, var_list=variables_to_train)
        # Add total_loss to summary.
        summaries.add(
            tf.scalar_summary('total_loss', total_loss, name='total_loss'))

        # Create gradient updates.
        grad_updates = optimizer.apply_gradients(clones_gradients,
                                                 global_step=global_step)
        update_ops.append(grad_updates)

        update_op = tf.group(*update_ops)
        train_tensor = control_flow_ops.with_dependencies([update_op],
                                                          total_loss,
                                                          name='train_op')

        # Add the summaries from the first clone. These contain the summaries
        # created by model_fn and either optimize_clones() or _gather_clone_loss().
        summaries |= set(
            tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope))

        # Merge all summaries together.
        summary_op = tf.merge_summary(list(summaries), name='summary_op')

        ###########################
        # Kicks off the training. #
        ###########################
        loss = learning.train(
            train_tensor,
            logdir=FLAGS.train_dir,
            master=FLAGS.master,
            is_chief=(FLAGS.task == 0),
            init_fn=_get_init_fn(),
            summary_op=summary_op,
            number_of_steps=FLAGS.max_number_of_steps,
            log_every_n_steps=FLAGS.log_every_n_steps,
            save_summaries_secs=FLAGS.save_summaries_secs,
            save_interval_secs=FLAGS.save_interval_secs,
            sync_optimizer=optimizer if FLAGS.sync_replicas else None,
            accurancies=accurancy)

        print('Training loss: ' + str(loss))
コード例 #21
0
def main(argv):
    del argv

    # Load data.
    dataset_tools = import_module('tools.' + FLAGS.dataset)
    train_images, train_labels = dataset_tools.get_data('train')

    architecture = getattr(semisup.architectures, FLAGS.architecture)

    num_labels = dataset_tools.NUM_LABELS
    image_shape = dataset_tools.IMAGE_SHAPE

    # Sample labeled training subset.
    seed = FLAGS.sup_seed if FLAGS.sup_seed != -1 else None
    sup_by_label = semisup.sample_by_label(train_images, train_labels,
                                           FLAGS.sup_per_class, num_labels,
                                           seed)

    graph = tf.Graph()
    with graph.as_default():
        with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks,
                                                      merge_devices=True)):

            # Set up inputs.
            t_sup_images, t_sup_labels = semisup.create_per_class_inputs(
                sup_by_label, FLAGS.sup_per_batch)

            if FLAGS.remove_classes:
                t_sup_images = tf.slice(
                    t_sup_images, [0, 0, 0, 0],
                    [FLAGS.sup_per_batch * (
                        num_labels - FLAGS.remove_classes)] +
                    image_shape)

            # Resize if necessary.
            if FLAGS.new_size > 0:
                new_shape = [FLAGS.new_size, FLAGS.new_size, image_shape[-1]]
            else:
                new_shape = None

            # Apply augmentation
            if FLAGS.augmentation:
                # TODO(haeusser) generalize augmentation
                def _random_invert(inputs, _):
                    randu = tf.random_uniform(
                        shape=[FLAGS.sup_per_batch * num_labels], minval=0.,
                        maxval=1.,
                        dtype=tf.float32)
                    randu = tf.cast(tf.less(randu, 0.5), tf.float32)
                    randu = tf.expand_dims(randu, 1)
                    randu = tf.expand_dims(randu, 1)
                    randu = tf.expand_dims(randu, 1)
                    inputs = tf.cast(inputs, tf.float32)
                    return tf.abs(inputs - 255 * randu)

                augmentation_function = _random_invert
            else:
                augmentation_function = None

            # Create function that defines the network.
            model_function = partial(
                architecture,
                new_shape=new_shape,
                img_shape=image_shape,
                augmentation_function=augmentation_function,
                batch_norm_decay=FLAGS.batch_norm_decay,
                emb_size=FLAGS.emb_size)

            # Set up semisup model.
            model = semisup.SemisupModel(model_function, num_labels,
                                         image_shape)

            # Compute embeddings and logits.
            t_sup_emb = model.image_to_embedding(t_sup_images)

            t_sup_logit = model.embedding_to_logit(t_sup_emb)

            # Add losses.
            model.add_logit_loss(t_sup_logit,
                                 t_sup_labels,
                                 weight=FLAGS.logit_weight)

            variables_to_train = [v for v in tf.trainable_variables() if v.name.startswith('net/fully_connected')]
            for v in variables_to_train:
                print(v.name, v.shape)

            # Set up learning rate
            t_learning_rate = tf.maximum(
                tf.train.exponential_decay(
                    FLAGS.learning_rate,
                    model.step,
                    FLAGS.decay_steps,
                    FLAGS.decay_factor,
                    staircase=True),
                FLAGS.minimum_learning_rate)
            total_loss = tf.losses.get_total_loss()
            optimizer = tf.train.AdamOptimizer(t_learning_rate)

            variables_to_restore = None
            restore_ckpt = None
            if FLAGS.pretrained:
              variables_to_restore = [v for v in tf.trainable_variables()
                                      if not (v.name.startswith('net/fully_connected'))]
              restore_ckpt = FLAGS.pretrained
              for v in variables_to_restore:
                print(v.name)
            learning.train(graph, FLAGS.logdir,
                           total_loss, optimizer,
                           variables_to_train, model.step,
                           num_steps=FLAGS.max_steps, log_interval=20,
                           summary_interval=100, snapshot_interval=5000,
                           variables_to_restore=variables_to_restore, restore_ckpt=restore_ckpt)
            return

            # Create training operation and start the actual training loop.
            train_op = model.create_train_op(t_learning_rate)

            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            # config.log_device_placement = True

            saver = tf_saver.Saver(max_to_keep=FLAGS.max_checkpoints,
                                   keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours)  # pylint:disable=line-too-long

            slim.learning.train(
                train_op,
                logdir=FLAGS.logdir + '/train',
                save_summaries_secs=FLAGS.save_summaries_secs,
                save_interval_secs=FLAGS.save_interval_secs,
                master=FLAGS.master,
                is_chief=(FLAGS.task == 0),
                startup_delay_steps=(FLAGS.task * 20),
                log_every_n_steps=FLAGS.log_every_n_steps,
                session_config=config,
                trace_every_n_steps=1000,
                saver=saver,
                number_of_steps=FLAGS.max_steps,
            )
コード例 #22
0
                                         milestones=[31, 40],
                                         gamma=0.1)
else:
    print("type:{}".format(val))
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=[16, 30],
                                         gamma=0.1)

# training the model
epochs = 30
isckpt = False  # if you want to load model params from checkpoint, set it to True
# print parameters
print("{}: {}, gamma: {}, nparts: {}, epochs: {}".format(
    optmeth, lr, gamma1, nparts, epochs))
model = learning.train(model,
                       dataloader,
                       criterion,
                       optimizer,
                       scheduler,
                       version=version,
                       epochs=epochs,
                       dataName=datasetname)

# print("测试集精度-----------------------")
# rsltparams = modellearning2.eval(model=model, dataloader=dataloader['test'])

#### save model
# modelpath = './models'
#modelname = "{}_parts{}-sc{}-{}--SENet50-full-rglz.model".format(datasetname, nparts, gamma1, lr)
#torch.save(model.state_dict(), save_pkl + modelname)
コード例 #23
0
# Option 1: Apply some random mutations to make the network deeper.
num_mutations = 0
for _ in range(num_mutations):
    net.mutate()
    #net.add_connection()
logging.info(f'Applied {num_mutations} mutations, network now has {net}')
#net.restructure_layers()
#logging.info('Restructured layers')

# Option 2: Manually add a hidden layer with 500 neurons.
# num_hidden = 500
# net.neurons_per_layer.insert(1, list(range(net.num_neurons, net.num_neurons + num_hidden)))
# net.connections = []  # erase any connections between input and output
# for from_neuron in net.neurons_per_layer[0]:  # add connections between input and hidden
#     for to_neuron in net.neurons_per_layer[1]:
#         net.connections.append([from_neuron, to_neuron])
# for from_neuron in net.neurons_per_layer[1]:  # add connections between hidden and output
#     for to_neuron in net.neurons_per_layer[2]:
#         net.connections.append([from_neuron, to_neuron])
# net.num_neurons += num_hidden
# net.reset_weights()  # need to call this manually due to manual changes to architecture
# logging.info(f'Added a hidden layer with {num_hidden} neurons, network now has {net}')

net.create_torch_layers(device)
train(net,
      train_dataset,
      params,
      device=device,
      verbose=2,
      test_dataset=test_dataset)
コード例 #24
0
import matplotlib.pyplot as plt
import tensorflow as tf

from learning import train, evaluate
from utils import show_images, get_config


if __name__ == "__main__":

    # reduce_gpu()
    config = get_config()

    # training
    train(g_pretrained=True, n_trainable=1, generic=False, config=config)

    # # evaluation
    # im_sr, im_hr, _, _ = evaluate(
    #     15, landscapes=True, generic=True, land_class=None)

    # show_images(im_sr, im_hr)