コード例 #1
0
 def __init__(self, runner_id, opponent_id, gym_class, network_descriptions,
              actor_weights, critic_weights, opponent_weights, gam, lam):
     self.runner_id = runner_id
     self.opponent_id = opponent_id
     self.gym_class = gym_class
     actor_description = network_descriptions['actor']
     self.actor = model_factory.get_model('Actor')(model_factory.get_model(
         actor_description[0])(actor_description[1], actor_description[2],
                               actor_description[3], actor_description[4],
                               actor_description[5]))
     self.actor.set_weights(actor_weights)
     critic_description = network_descriptions['critic']
     self.critic = model_factory.get_model(critic_description[0])(
         critic_description[1], critic_description[2],
         critic_description[3])
     self.critic.set_weights(critic_weights)
     opponent_description = network_descriptions['opponent']
     self.opponent = model_factory.get_model('Actor')(
         model_factory.get_model(opponent_description[0])(
             opponent_description[1], opponent_description[2],
             opponent_description[3], opponent_description[4],
             opponent_description[5]))
     self.opponent.set_weights(opponent_weights)
     self.gym = gym_class(self.opponent)
     self.gam = gam
     self.lam = lam
コード例 #2
0
 def __init__(self, network_descriptions, actor_weights, critic_weights,
              data, hyperparameters):
     self.data_keys = [
         'value_targets', 'states', 'actions', 'sampling_action_log_probs',
         'advantages'
     ]
     self.epsilon = 0.2
     self.shuffle_buffer_size = 2000
     self.prefetch_buffer_size = 2000
     self.batch_size = 64
     self.critic_optimization_epochs = 20
     self.actor_optimization_epochs = 20
     self.entropy_coefficient = 0.01
     actor_description = network_descriptions['actor']
     self.actor = model_factory.get_model('Actor')(model_factory.get_model(
         actor_description[0])(actor_description[1], actor_description[2],
                               actor_description[3], actor_description[4],
                               actor_description[5]))
     self.actor.set_weights(actor_weights)
     critic_description = network_descriptions['critic']
     self.critic = model_factory.get_model(critic_description[0])(
         critic_description[1], critic_description[2],
         critic_description[3])
     self.critic.set_weights(critic_weights)
     self.dataset = self.create_datasets(data)
     #debug
     self.critic_loss_logger = []
     self.entropy_logger = []
     self.ppo_policy_loss_logger = []
コード例 #3
0
def get_initial_weights(network_descriptions):
    """
    initializes a single network of each kind to gatheer initial weights
    """
    import model_factory
    actor_model = model_factory.get_model(network_descriptions['actor'][0])
    critic_model = model_factory.get_model(network_descriptions['critic'][0])
    actor = actor_model(network_descriptions['actor'][1], network_descriptions['actor'][2], network_descriptions['actor'][3], network_descriptions['actor'][4], network_descriptions['actor'][5])
    critic = critic_model(network_descriptions['critic'][1], network_descriptions['critic'][2], network_descriptions['critic'][3])
    actor_weights = actor.get_weights()
    critic_weights = critic.get_weights()
    return actor_weights, critic_weights
コード例 #4
0
    def __init__(self, name):
        config_data = read_file_in_dir('./', ROOT_CONFIGS_DIR + '/' + name + '.json')
        if config_data is None:
            raise Exception("Configuration file doesn't exist: ", ROOT_CONFIGS_DIR + '/' + name)

        self.__name = config_data['experiment_name']
        self.__experiment_dir = os.path.join(ROOT_STATS_DIR, self.__name)

        # Load Datasets
        self.__coco_test, self.__vocab, self.__train_loader, self.__val_loader, self.__test_loader = get_datasets(
            config_data)

        # Setup Experiment
        self.__img_root_dir = config_data['dataset']['images_root_dir']
        self.__generation_config = config_data['generation']
        self.__epochs = config_data['experiment']['num_epochs']
        self.__lr = config_data['experiment']['learning_rate']
        self.__batch_size = config_data['dataset']['batch_size']
        self.__num_gpu = config_data['num_gpu']
        self.__vocab_size = len(self.__vocab)
        self.__current_epoch = 0
        self.__training_losses = []
        self.__val_losses = []
        self.__best_loss = 1000000000000.
        self.__best_model = None  # Save your best model in this field and use this in test method.

        # Init Model
        self.__encoder = get_model(config_data, self.__vocab, network_block='encoder')
        if config_data['model']['model_type'] == 'LSTM':
            self.__decoder = get_model(config_data, self.__vocab, network_block='decoder')
        elif config_data['model']['model_type'] == 'stackedLSTM':
            self.__decoder = get_model(config_data, self.__vocab, network_block='stacked_decoder')
        elif config_data['model']['model_type'] == 'RNN':
            self.__decoder = get_model(config_data, self.__vocab, network_block='RNNdecoder')
        elif config_data['model']['model_type'] == 'stackedRNN':
            self.__decoder = get_model(config_data, self.__vocab, network_block='stacked_RNNdecoder')
        else:
            assert(0 == 1), 'must select valid model_type'

        # TODO: Set these Criterion and Optimizers Correctly
        self.__criterion = nn.CrossEntropyLoss()
        self.__optimizer = torch.optim.Adam(filter(
            lambda p: p.requires_grad, nn.ModuleList([self.__encoder, self.__decoder]
                                                     ).parameters()), lr=self.__lr)
        # If you use SparseAdam, change nn.Embedding in model_factory to sparse=True

        self.__init_model()

        # Load Experiment Data if available
        self.__load_experiment()
コード例 #5
0
    def __init__(self, name):
        config_data = read_file_in_dir('./', name + '.json')
        if config_data is None:
            raise Exception("Configuration file doesn't exist: ", name)

        self.__name = config_data['experiment_name']
        self.__experiment_dir = os.path.join(ROOT_STATS_DIR, self.__name)

        # Load Datasets
        self.__coco_test, self.__vocab, self.__train_loader, self.__val_loader, self.__test_loader = get_datasets(
            config_data)

        # Setup Experiment
        self.__generation_config = config_data['generation']
        self.__epochs = config_data['experiment']['num_epochs']
        self.__current_epoch = 0
        self.__training_losses = []
        self.__val_losses = []
        self.__best_model = None  # Save your best model in this field and use this in test method.

        # Init Model
        self.__model = get_model(config_data, self.__vocab)

        # TODO: Set these Criterion and Optimizers Correctly
        self.__criterion = torch.nn.CrossEntropyLoss()
        self.__optimizer = torch.optim.Adam(self.__model.parameters(), lr=0.01)
        self.__init_model()

        # Load Experiment Data if available
        self.__load_experiment()
コード例 #6
0
ファイル: main.py プロジェクト: FernanOrtega/encoder-decoder
def main():
    args = sys.argv[1:]

    if len(args) != 6:
        print('Wrong number of arguments')
        print('Usage (relative paths!!): main.py <dataset path> <lang> <word2vec model path>'
              ' <# folds> <model option> <output csv path>')
        exit()

    # dir_path = os.path.dirname(os.path.realpath(__file__))
    # dataset_path = os.path.join(dir_path, args[0])
    dataset_path = args[0]
    data_set = [eval(file_line) for file_line in open(dataset_path, encoding='utf-8')]
    lang = args[1]
    start = time.time()
    # w2v_path = os.path.join(dir_path, args[2])
    w2v_path = args[2]
    w2v_model = WordEmbeddings(lang, w2v_path)
    end = time.time()
    print('WV loaded', (end - start))
    n_splits = int(args[3])
    model_option = get_model(args[4])
    # output_csv_path = os.path.join(dir_path, args[5])
    output_csv_path = args[5]

    execute_experiments(data_set, w2v_model, n_splits, model_option, output_csv_path)
コード例 #7
0
    def __init__(self, name):
        config_data = read_file_in_dir('./', name + '.json')
        if config_data is None:
            raise Exception("Configuration file doesn't exist: ", name)

        self.name = config_data['experiment_name']
        self.experiment_dir = os.path.join(ROOT_STATS_DIR, self.name)

        # Load Datasets
        self.train_loader, self.val_loader, self.test_loader = get_datasets(config_data)

        # Setup Experiment
        self.epochs = config_data['experiment']['num_epochs']
        lr = config_data['experiment']['learning_rate']
        wd = config_data['experiment']['weight_decay']
        momentum = config_data["experiment"]["momentum"]
        self.current_epoch = 0
        self.training_losses = []
        self.val_losses = []
        self.training_mean_aucs = []
        self.val_mean_aucs = []
        self.best_model = None  # Save your best model in this field and use this in test method.

        # Init Model
        self.model = get_model(config_data)

        # TODO: Set these Criterion and Optimizers Correctly
        self.criterion = torch.nn.BCEWithLogitsLoss()
        self.optimizer = torch.optim.SGD(self.model.parameters(), lr=lr, weight_decay=wd, momentum=momentum)

        self.init_model()

        # Load Experiment Data if available
        self.load_experiment()
コード例 #8
0
    def train(self, architecture, fold, lr, batch_size, epochs, iter_size, epoch_size=None, validation_size=None,
              patience=4, optim="adam", ignore_prev_best_loss=False, cached_part=0.0, crop_central=False):
        train_loader, valid_loader, num_classes = get_loaders(batch_size,
                                                              train_transform=train_augm(),
                                                              valid_transform=valid_augm(),
                                                              n_fold=fold,
                                                              cached_part=cached_part,
                                                              crop_central=crop_central)
        validation_size = len(valid_loader) * batch_size
        model = get_model(num_classes, architecture)
        criterion = CrossEntropyLoss(size_average=False)

        self.ignore_prev_best_loss = ignore_prev_best_loss
        self.lr = lr
        self.model = model
        self.root = Path('../results/{}'.format(architecture))
        self.fold = fold
        self.optim = optim
        train_kwargs = dict(
            args=dict(iter_size=iter_size, n_epochs=epochs,
                      batch_size=batch_size, epoch_size=epoch_size),
            model=model,
            criterion=criterion,
            train_loader=train_loader,
            valid_loader=valid_loader,
            validation_size=validation_size,
            patience=patience
        )
        self._train(**train_kwargs)
コード例 #9
0
def predict(texts, ind2class, algorithm):
    tokenizer = load_vectorizer()
    text_dataset = tokenizer.texts_to_sequences(texts)
    pad_dataset = sequence.pad_sequences(text_dataset, maxlen=max_features)
    model = model_factory.get_model(len(ind2class), algorithm, mode="predict")
    predictions = model.predict(pad_dataset)
    return predictions
コード例 #10
0
ファイル: experiment.py プロジェクト: aahamed/SGL-DANN
    def __init__(self, args):
        # import pdb; pdb.set_trace()
        self.args = args
        self.name = args.save
        self.experiment_dir = os.path.join(ROOT_STATS_DIR, self.name)

        # get train dataloader for src domain
        self.src_train_queue, self.src_ul_queue, self.src_val_queue = \
                get_train_dataloaders( args.src_set, args )
        # get train dataloader for tgt domain
        self.tgt_train_queue, self.tgt_ul_queue, self.tgt_val_queue = \
                get_train_dataloaders( args.tgt_set, args )

        # get test dataloader for src domain
        self.src_test_queue = get_test_dataloaders(args.src_set, args)
        # get test dataloader for tgt domain
        self.tgt_test_queue = get_test_dataloaders(args.tgt_set, args)

        # Setup Experiment
        self.epochs = args.epochs
        self.current_epoch = 0
        self.best_model = None

        # use same criterion for label and domain loss
        self.criterion = torch.nn.NLLLoss()

        # Init Model
        self.models, self.models_pretrain = get_model(args, self.criterion,
                                                      self.criterion)

        self.optimizers, self.optimizers_pretrain = \
                get_optimizers( args, self.models, self.models_pretrain )
        self.schedulers, self.schedulers_pretrain = \
                get_schedulers( args, self.optimizers, self.optimizers_pretrain )

        # learner group for weights V_k
        self.sgl_pretrain = SGL(self.models_pretrain, self.optimizers_pretrain,
                                self.schedulers_pretrain, self.criterion,
                                self.criterion, self.experiment_dir, self.args)
        # learner group for weights W_k
        self.sgl = SGL(self.models, self.optimizers, self.schedulers,
                       self.criterion, self.criterion, self.experiment_dir,
                       self.args)
        # architect uses weights W_k
        self.architect = ArchitectDA(self.sgl, args)

        # stats
        self.pretrain_stats = SGLStats(self.sgl_pretrain, 'pretrain',
                                       self.experiment_dir)
        self.train_stats = SGLStats(self.sgl, 'train', self.experiment_dir)
        self.val_stats = SGLStats(self.sgl, 'validation', self.experiment_dir)
        self.final_train_stats = SGLStats(self.sgl, 'final_train',
                                          self.experiment_dir)
        self.test_stats = SGLStats(self.sgl, 'test', self.experiment_dir)

        self.init_model()

        # Load Experiment Data if available
        self.load_experiment()
コード例 #11
0
def main(architecture, folds, tta):
    test_dataset = InternValidDataset(transform=test_augm())
    labels = None
    for fold in folds:
        model = get_model(num_classes=test_dataset.num_classes,
                          architecture=architecture)
        state = torch.load('../results/{}/best-model_{}.pt'.format(
            architecture, fold))
        model.load_state_dict(state['model'])
        model.eval()
        labels = []
        with open('../results/{}/{}_valid_prob.csv'.format(architecture, fold),
                  "w") as f:
            for idx in tqdm.tqdm(range(len(test_dataset))):
                best_conf = 0
                best_pred = None
                for rot in range(4):
                    test_dataset.rot = rot
                    in1 = []
                    in2 = []
                    for _ in range(tta):
                        x = test_dataset[idx][0]
                        in1.append(x[0])
                        in2.append(x[1])
                    in1 = variable(torch.stack(in1))
                    in2 = variable(torch.stack(in2))
                    pred = model(in1, in2).data.cpu().numpy()
                    pred = np.array([softmax(x) for x in pred])
                    pred = np.sum(pred, axis=0) / len(pred)
                    if np.max(pred) > best_conf:
                        best_conf = np.max(pred)
                        best_pred = pred
                labels.append(test_dataset[idx][1])
                probas = ','.join([str(x) for x in best_pred])
                f.write('{}\n'.format(probas))

    dfs = [
        pd.read_csv('../results/{}/{}_valid_prob.csv'.format(architecture, i),
                    header=None) for i in folds
    ]
    classes = [
        'HTC-1-M7', 'LG-Nexus-5x', 'Motorola-Droid-Maxx', 'Motorola-Nexus-6',
        'Motorola-X', 'Samsung-Galaxy-Note3', 'Samsung-Galaxy-S4',
        'Sony-NEX-7', 'iPhone-4s', 'iPhone-6'
    ]
    for df in dfs:
        df.columns = classes
    df = dfs[0].copy()
    for i in np.arange(1, len(folds)):
        df[classes] += dfs[i][classes]
    df[classes] /= len(folds)
    matched = 0
    for i in np.arange(len(test_dataset)):
        pred = df[classes].iloc[i].values.argmax()
        real = labels[i]
        if pred == real:
            matched += 1
    print('accuracy = {}'.format(matched / len(test_dataset)))
コード例 #12
0
    def __init__(self, name, instance_name=None):
        config_data = read_file_in_dir('./config/', name + '.json')
        if config_data is None:
            raise Exception("Configuration file doesn't exist: ", name)

        # Load Datasets
        if instance_name is not None:
            self.name = instance_name
        else:
            self.name = config_data['experiment_name']
        self.experiment_dir = os.path.join(ROOT_STATS_DIR, self.name)

        ds_train, ds_val = get_datasets(config_data)
        self.train_loader = DataLoader(
            ds_train,
            batch_size=config_data['experiment']['batch_size_train'],
            shuffle=True,
            num_workers=config_data['experiment']['num_workers'],
            pin_memory=True)
        self.val_loader = DataLoader(
            ds_val,
            batch_size=config_data['experiment']['batch_size_val'],
            shuffle=True,
            num_workers=config_data['experiment']['num_workers'],
            pin_memory=True)

        ds_test = get_test_dataset(config_data)
        self.test_loader = DataLoader(
            ds_test,
            batch_size=1,
            num_workers=config_data['experiment']['num_workers'],
            pin_memory=True)

        # Setup Experiment Stats
        self.epochs = config_data['experiment']['num_epochs']
        self.current_epoch = 0
        self.training_losses = []
        self.val_losses = []
        self.val_dices = []
        self.ed_dices = []
        self.es_dices = []

        # Init Model and Criterion
        self.criterion = get_criterion(config_data)
        self.model = get_model(config_data)
        self.optimizer = torch.optim.Adam(
            self.model.parameters(),
            lr=config_data['experiment']['learning_rate'])
        self.init_model()
        self.ensemble = config_data['model']['ensemble']

        # Load Experiment Data if available
        self.load_experiment()
        self.log(str(config_data))
コード例 #13
0
ファイル: model.py プロジェクト: cortwave/cdiscount-kaggle
    def train(self,
              architecture,
              fold,
              lr,
              batch_size,
              epochs,
              epoch_size,
              validation_size,
              iter_size,
              patience=4,
              optim="adam",
              ignore_prev_best_loss=False):
        print("Start training with following params:",
              f"architecture = {architecture}", f"fold = {fold}", f"lr = {lr}",
              f"batch_size = {batch_size}", f"epochs = {epochs}",
              f"epoch_size = {epoch_size}",
              f"validation_size = {validation_size}",
              f"iter_size = {iter_size}", f"optim = {optim}",
              f"patience = {patience}")

        train_loader, valid_loader, num_classes = get_loaders(
            batch_size,
            train_transform=train_augm(),
            valid_transform=valid_augm(),
            n_fold=fold)
        model = get_model(num_classes, architecture)
        criterion = CrossEntropyLoss(size_average=False)

        self.ignore_prev_best_loss = ignore_prev_best_loss
        self.lr = lr
        self.model = model
        self.root = Path(f"../results/{architecture}")
        self.fold = fold
        self.optim = optim
        self.train_loss_logger = VisdomPlotLogger('line',
                                                  opts={'title': 'Train Loss'})
        self.lr_logger = VisdomPlotLogger(
            'line', opts={'title': 'Train Learning Rate'})
        self.test_loss_logger = VisdomPlotLogger('line',
                                                 opts={'title': 'Test Loss'})
        self.test_accuracy_logger = VisdomPlotLogger(
            'line', opts={'title': 'Test Accuracy'})
        train_kwargs = dict(args=dict(iter_size=iter_size,
                                      n_epochs=epochs,
                                      batch_size=batch_size,
                                      epoch_size=epoch_size),
                            model=model,
                            criterion=criterion,
                            train_loader=train_loader,
                            valid_loader=valid_loader,
                            validation_size=validation_size,
                            patience=patience)
        self._train(**train_kwargs)
コード例 #14
0
def get_essentials():
    print("loading the model")
    model = model_factory.get_model(args)
    # load weights
    if args.model_file != 'none':
        model.load(args.model_file)
        print("model loaded")

    print("getting the data providers")
    test_data = data_provider_factory.get_data_providers(args,
                                                         rng,
                                                         test_set=True)

    return model, test_data
コード例 #15
0
    def predict(self, architecture, fold, tta=5, mode='submit', name="sub"):
        test_dataset = TestDataset(transform=test_augm())
        model = get_model(num_classes=test_dataset.num_classes, architecture=architecture)
        state = torch.load('../results/{}/best-model_{}.pt'.format(architecture, fold))
        model.load_state_dict(state['model'])
        model.eval()
        if mode == 'submit':
            with open('../results/{}/{}_{}.csv'.format(architecture, name, fold), "w") as f:
                f.write("fname,camera\n")
                for idx in tqdm.tqdm(range(len(test_dataset))):
                    images = torch.stack([test_dataset[idx][0] for _ in range(tta)])
                    images = variable(images)
                    pred = model(images).data.cpu().numpy()
                    pred = np.sum(pred, axis=0)
                    fname = test_dataset[idx][1]
                    label = np.argmax(pred, 0)
                    camera_model = test_dataset.inverse_dict[label]
                    f.write('{},{}\n'.format(fname, camera_model))
        else:
            def softmax(x):
                """Compute softmax values for each sets of scores in x."""
                e_x = np.exp(x - np.max(x))
                return e_x / e_x.sum(axis=0)

            with open('../results/{}/{}_{}_prob.csv'.format(architecture, name, fold), "w") as f:
                for idx in tqdm.tqdm(range(len(test_dataset))):
                    best_conf = 0
                    best_pred = None
                    for rot in range(4):
                        test_dataset.rot = rot
                        in1 = []
                        in2 = []
                        for _ in range(tta):
                            x = test_dataset[idx][0]
                            in1.append(x[0])
                            in2.append(x[1])
                        in1 = variable(torch.stack(in1))
                        in2 = variable(torch.stack(in2))
                        fname = test_dataset[idx][1]
                        pred = model(in1, in2).data.cpu().numpy()
                        pred = np.array([softmax(x) for x in pred])
                        pred = np.sum(pred, axis=0) / len(pred)
                        if np.max(pred) > best_conf:
                            best_conf = np.max(pred)
                            best_pred = pred
                    probas = ','.join([str(x) for x in best_pred])
                    f.write('{},{}\n'.format(fname, probas))
コード例 #16
0
def train(train_file, algorithm):
    X_train, y_train, ind_to_class = extract_features(train_file)
    model = model_factory.get_model(len(ind_to_class), algorithm)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    early_stopping = EarlyStopping(patience=3, verbose=1)
    checkpointer = ModelCheckpoint(
        filepath=resource_dir_path + "/models/checkpoints/" + algorithm + "-" +
        "{epoch:02d}-" + str(int(round(time.time() * 1000))) + ".hdf5",
        verbose=1,
        save_best_only=True)
    model.fit(X_train,
              np_utils.to_categorical(y_train, len(ind_to_class)),
              validation_split=0.2,
              verbose=2,
              callbacks=[checkpointer, early_stopping])
コード例 #17
0
ファイル: model.py プロジェクト: cortwave/cdiscount-kaggle
 def predict_validation(self, architecture, fold, tta, batch_size):
     n_classes = 5270
     model = get_model(num_classes=n_classes, architecture=architecture)
     state = torch.load(f"../results/{architecture}/best-model_{fold}.pt")
     model.load_state_dict(state['model'])
     test_augm = valid_augm()
     label_map = pd.read_csv("../data/labels_map.csv")
     label_map.index = label_map['label_id']
     loader = get_valid_loader(fold, batch_size, test_augm)
     with open(f"../results/{architecture}/validation_{fold}.csv",
               "w") as f:
         f.write("_id,category_id\n")
         for images, product_ids in tqdm.tqdm(loader):
             images = variable(images)
             preds = model(images).data.cpu().numpy()
             for pred, product_id in zip(preds, product_ids):
                 label = np.argmax(pred, 0)
                 cat_id = label_map.ix[label]['category_id']
                 f.write(f"{product_id},{cat_id}\n")
コード例 #18
0
ファイル: experiment.py プロジェクト: Krishna14/CSE251B
    def __init__(self, name):
        config_data = read_file_in_dir('./', name + '.json')
        if config_data is None:
            raise Exception("Configuration file doesn't exist: ", name)

        self.__name = config_data['experiment_name']
        self.__experiment_dir = os.path.join(ROOT_STATS_DIR, self.__name)

        # Load Datasets
        self.__coco_test, self.__vocab, self.__train_loader, self.__val_loader, self.__test_loader = get_datasets(
            config_data)

        # Setup Experiment
        self.__generation_config = config_data['generation']
        self.__epochs = config_data['experiment']['num_epochs']
        self.__learning_rate = config_data['experiment']['learning_rate']
        self.__early_stop_threshold = config_data['experiment'][
            'early_stop_threshold']
        self.__test_caption_path = config_data['dataset'][
            'test_annotation_file_path']
        self.__max_caption_count = config_data['generation']['max_length']
        self.__current_epoch = 0
        self.__training_losses = []
        self.__val_losses = []
        self.__best_model = None  # Save your best model in this field and use this in test method.
        #
        self.__best_encoder_model = None  # Save the best encoder model here
        self.__best_decoder_model = None  # Save the best decoder model here

        # Init Model
        self.__encoder_model, self.__decoder_model = get_model(
            config_data, self.__vocab)
        # TODO: Set these Criterion and Optimizers Correctly
        self.__criterion = nn.CrossEntropyLoss()
        parameters = list(self.__decoder_model.parameters()) + list(
            self.__encoder_model.parameters()) + list(
                self.__encoder_model.batchNorm.parameters())
        self.__optimizer = optim.Adam(parameters, lr=self.__learning_rate)
        self.__MODEL_NAME = self.__name + '_' + str(self.__learning_rate) + '_' + str(self.__epochs) + '_' + \
        str(config_data['model']['embedding_size']) + '_' + str(config_data['model']['hidden_size'])

        self.__use_gpu = False
        self.__init_model()
コード例 #19
0
ファイル: experiment.py プロジェクト: jboboyle/SGL-DANN
    def __init__(self, args):
        self.args = args
        self.name = args.save
        self.experiment_dir = os.path.join(ROOT_STATS_DIR, self.name)

        # Load Datasets
        self.train_queue, self.ul_queue, self.val_queue = \
                get_dataloaders( args )

        # Setup Experiment
        self.epochs = args.epochs
        self.current_epoch = 0
        self.training_losses = []
        self.val_losses = []
        self.best_model = None

        # Init Model
        self.models, self.models_pretrain = get_model(args)

        self.criterion = torch.nn.CrossEntropyLoss()
        self.optimizers, self.optimizers_pretrain = \
                get_optimizers( args, self.models, self.models_pretrain )
        self.schedulers, self.schedulers_pretrain = \
                get_schedulers( args, self.optimizers, self.optimizers_pretrain )

        # learner group for weights V_k
        self.sgl_pretrain = SGL(self.models_pretrain, self.optimizers_pretrain,
                                self.schedulers_pretrain, self.criterion,
                                self.experiment_dir, self.args)
        # learner group for weights W_k
        self.sgl = SGL(self.models, self.optimizers, self.schedulers,
                       self.criterion, self.experiment_dir, self.args)
        # architect uses weights W_k
        self.architect = ArchitectSGL(self.sgl, args)

        # stats
        self.train_stats = SGLStats(self.sgl, 'train', self.experiment_dir)
        self.val_stats = SGLStats(self.sgl, 'validation', self.experiment_dir)

        self.init_model()

        # Load Experiment Data if available
        self.load_experiment()
コード例 #20
0
def main():
    data_dir, save_dir, arch, learning_rate, hidden_units, epochs, gpu = get_input_args()
    
    trainloader = load_data.get_trainloader(data_dir)
    validationloader = load_data.get_validationloader(data_dir)

    model = model_factory.get_model(arch, hidden_units)
    criterion = nn.NLLLoss()
    optimizer = optim.Adam(model.classifier.parameters(), learning_rate)

    train(model, trainloader, validationloader, optimizer, criterion, epochs, gpu)

    model_factory.save_trained_model(
        model, 
        arch, 
        optimizer,
        load_data.get_train_class_to_idx(data_dir), 
        hidden_units,
        epochs,
        learning_rate,
        save_dir)
コード例 #21
0
ファイル: experiment.py プロジェクト: y8han/UCSD_CSE251B
    def __init__(self, name):
        config_data = read_file_in_dir('./', name + '.json')
        if config_data is None:
            raise Exception("Configuration file doesn't exist: ", name)
        self.__name = config_data['experiment_name']
        self.__data_path = config_data['dataset']['data_location']
        self.__experiment_dir = os.path.join('./experiment_data', self.__name)

        # Load Datasets
        self.__train_loader, self.__test_loader = get_datasets(config_data)

        # Setup Experiment
        self.__epochs = config_data['experiment']['num_epochs']
        self.__current_epoch = 0
        self.__training_losses = []
        self.__val_losses = []
        self.__best_model = None  # Save your best model in this field and use this in test method.

        # Init Model
        self.__model = get_model(config_data)
        self.__load_experiment()
コード例 #22
0
ファイル: model.py プロジェクト: cortwave/cdiscount-kaggle
 def predict(self, architecture, fold, tta, batch_size, name="sub"):
     print("Start predicting with following params:",
           f"architecture = {architecture}", f"fold = {fold}",
           f"tta = {tta}")
     n_classes = 5270
     model = get_model(num_classes=n_classes, architecture=architecture)
     state = torch.load(f"../results/{architecture}/best-model_{fold}.pt")
     model.load_state_dict(state['model'])
     test_augm = valid_augm()
     label_map = pd.read_csv("../data/labels_map.csv")
     label_map.index = label_map['label_id']
     test_dataset = TestDataset(transform=test_augm)
     with open(f"../results/{architecture}/{name}_{fold}.csv", "w") as f:
         f.write("_id,category_id\n")
         for idx in tqdm.tqdm(range(len(test_dataset))):
             images = torch.stack(
                 [test_dataset[idx][0] for i in range(tta)])
             images = variable(images)
             pred = model(images).data.cpu().numpy()
             pred = sum(pred)
             product_id = test_dataset[idx][1]
             label = np.argmax(pred, 0)
             cat_id = label_map.ix[label]['category_id']
             f.write(f"{product_id},{cat_id}\n")
コード例 #23
0
def build_model(args):
    return model_factory.get_model(args)
コード例 #24
0
from config import Config

from dataset import create_test_loader
from model_factory import get_model
from meanteacher import Tester

from model_utils import save_checkpoint, load_checkpoint

if __name__ == "__main__":
    cfg = Config()
    cfg.device = torch.device("cuda" if cfg.device_ids != "cpu" else "cpu")

    # dataset
    eval_loader = create_test_loader(cfg.data_dir, cfg)

    # create model
    model = get_model(cfg.model_arch, pretrained=cfg.pretrained)
    ema_model = get_model(cfg.model_arch, pretrained=cfg.pretrained, ema=True)

    # resume training / load trained weights
    last_epoch = 0
    if cfg.resume:
        model, ema_model, optimizer, last_epoch = load_checkpoint(
            model, ema_model, cfg.resume)

    # create trainer
    tester = Tester(cfg, model, ema_model)
    tester._set_device(cfg.device)

    results = tester(eval_loader)
コード例 #25
0
                            default=1.0e-4)
        parser.add_argument('-m',
                            help='model',
                            dest='model',
                            type=str,
                            default="chauffeur")
        args = parser.parse_args()

        # 'rainy_foggy_automold', 'rainy_foggy_iaa', 'exact_rotate'
        transformation_mode = 'rainy_foggy_automold'
        path_train_embed_and_neighbor_acc = folder + 'natural_train_embed_and_neighbor_acc_' + args.model + '_' + transformation_mode + '_' + str(
            neighbor_num) + '_x4'
        path_test_embed_and_neighbor_acc = folder + 'natural_test_embed_and_neighbor_acc_' + args.model + '_' + transformation_mode + '_x4'
        args.data_dir = '/home/zhongzzy9/Documents/self-driving car/misbehavior_prediction/datasets/dataset5'
        data = utils_train_self_driving_car.load_data(args)
        model = model_factory.get_model(args)
        assert model is not None
        print(model.summary())

        embed_train, y_train, y_pred_train, mean_l1_dist_train = get_embed(
            model, 'train', neighbor_num, transformation_mode)
        transformed_embed_train = embed_train
        # if model in ['epoch', 'chauffeur']:
        #     from sklearn.decomposition import PCA
        #     pca = PCA(n_components=300)
        #     pca.fit(embed_train)
        #     print('explained_variance_ratio_',np.sum(pca.explained_variance_ratio_))
        #     print(embed_train.shape)
        #     transformed_embed_train = pca.transform(embed_train)
        #     print(transformed_embed_train.shape)
        # else:
コード例 #26
0
def train():
    """
    Load train/validation data set and train the model
    """
    parser = argparse.ArgumentParser(
        description='Behavioral Cloning Training Program')
    parser.add_argument('-d',
                        help='data directory',
                        dest='data_dir',
                        type=str,
                        default='../datasets/dataset5/')
    parser.add_argument('-t',
                        help='test size fraction',
                        dest='test_size',
                        type=float,
                        default=0.2)
    parser.add_argument('-k',
                        help='drop out probability',
                        dest='keep_prob',
                        type=float,
                        default=0.5)
    parser.add_argument('-n',
                        help='number of epochs',
                        dest='nb_epoch',
                        type=int,
                        default=500)
    parser.add_argument('-s',
                        help='samples per epoch',
                        dest='samples_per_epoch',
                        type=int,
                        default=100)
    parser.add_argument('-b',
                        help='batch size',
                        dest='batch_size',
                        type=int,
                        default=256)
    parser.add_argument('-o',
                        help='save best models only',
                        dest='save_best_only',
                        type=s2b,
                        default='true')
    parser.add_argument('-l',
                        help='learning rate',
                        dest='learning_rate',
                        type=float,
                        default=1.0e-4)
    parser.add_argument('-sl',
                        help='sequence length',
                        dest='sequence_length',
                        type=int,
                        default=3)
    parser.add_argument('-m',
                        help='model',
                        dest='model',
                        type=str,
                        default="chauffeur")
    args = parser.parse_args()

    print('-' * 30)
    print('Parameters')
    print('-' * 30)
    for key, value in vars(args).items():
        print('{:<20} := {}'.format(key, value))
    print('-' * 30)

    # TBD: change to relative path
    args.data_dir = '/home/zhongzzy9/Documents/self-driving car/misbehavior_prediction/datasets/dataset5'
    data = utils_train_self_driving_car.load_data(args)
    model = model_factory.get_model(args)
    assert model is not None
    train_model(model, args, *data)
コード例 #27
0
ファイル: train.py プロジェクト: MicheleXie0825/GithubLocal
from config import get_config
from model_factory import get_model
from datasets_factory import get_dataloaders
from tools import get_optimizer, get_scheduler
from tools import get_dir_name, get_log_name

args, _ = get_config()
log_name = get_log_name(args)
log_dir = get_dir_name(args.out_dir,
                       log_name)
if os.path.exists(log_dir):
    print(f"Log dir {log_dir} already exists. It will be overwritten.")
else:
    os.makedirs(log_dir)

model = get_model(args.model_architecture).to(args.device)

criterion = torch.nn.CrossEntropyLoss()
optimizer = get_optimizer(model,
                          args.optimizer,
                          args.learning_rate,
                          args.momentum,
                          args.weight_decay,
                          amsgrad=args.amsgrad)
scheduler = get_scheduler(optimizer, args.decay_step)

# model_save_path = os.path.join(log_dir, "model.pt")
# result_save_path = os.path.join(log_dir, "result.txt")

best_val_acc = 0.0
since = epoch_time_stamp = time.time()