Пример #1
0
    def run_increment_mode(self):
        """
        increment modle
        """
        model_name = None
        for cnt, sample_name in enumerate(DATASETS):
            # STEP1: accuracy result
            if cnt != 0:
                self.test(models_name=model_name,
                          input_name=sample_name,
                          output_name=self.output_test)

            # STEP2: call mapreduce, generate raw H and D
            output_name = sample_name + "out"
            result_name = self.mapreduce(sample_name=sample_name,
                                         output_name=output_name,
                                         is_increment=True)

            # STEP3: generate incremented model
            model_name = generate_model(input_filename=result_name,
                                        output_filename=result_name + "2",
                                        w_matrix_filename=self.w_matrix_filename,
                                        num=self.num,
                                        v=self.v,
                                        is_increment=True,
                                        a_inc=float(A_INC))
Пример #2
0
    def run_increment_mode(self):
        """
        increment modle
        """
        model_name = None
        for cnt, sample_name in enumerate(DATASETS):
            # STEP1: accuracy result
            if cnt != 0:
                self.test(models_name=model_name,
                          input_name=sample_name,
                          output_name=self.output_test)

            # STEP2: call mapreduce, generate raw H and D
            output_name = sample_name + "out"
            result_name = self.mapreduce(sample_name=sample_name,
                                         output_name=output_name)

            # STEP3: generate incremented model
            model_name = generate_model(
                input_filename=result_name,
                output_filename=result_name + "2",
                w_matrix_filename=self.w_matrix_filename,
                num=self.num,
                v=self.v,
                is_increment=True,
                a_inc=float(A_INC))
Пример #3
0
    def mapreduce(self, sample_name, output_name, is_increment=False):
        """
        the whole routine of training mapreduce
        """
        # step1: elsvm mapreduce
        print "*" * 40
        print "step1: elsvm mapreduce"
        output_name_step1 = sample_name + "_step1"
        result_name = self.elsvm_mapreduce(sample_name=sample_name,
                                           output_name=output_name_step1)

        print "^" * 40
        # step2: generate model data file
        print "*" * 40
        print "step2: generate model data file"
        model_name = generate_model(input_filename=result_name,
                                    output_filename=result_name + "2",
                                    w_matrix_filename=self.w_matrix_filename,
                                    num=self.num,
                                    v=self.v,
                                    is_increment=is_increment,
                                    a_inc=float(A_INC))

        print "^" * 40
        # step3: testsvm_step1.py for cnt and means
        print "step3: testsvm_step1.py for cnt and means"
        print "*" * 40
        output_testsvm_step1 = sample_name + "_testsvm_step1"
        result_argument = self.testsvm_step1(models_name=model_name,
                                             sample_name=sample_name,
                                             output_name=output_testsvm_step1)

        print "^" * 40
        # step4: testsvm_step2.py for another dataset
        print "step4: testsvm_step2.py for another dataset"
        print "*" * 40
        output_testsvm_step2 = sample_name + "_testsvm_step2"
        self.testsvm_step2(models_name=model_name,
                           sample_name=sample_name,
                           output_name=output_testsvm_step2)

        print "^" * 40
        # step5: eelsvm mapreduce
        print "step5: eelsvm mapreduce"
        print "*" * 40
        output_name_final = sample_name + "_final_step"
        result_final = self.eelsvm_mapreduce(sample_name=output_testsvm_step2,
                                             output_name=output_name_final,
                                             models_name=result_argument)

        print "^" * 40
        return result_final
Пример #4
0
def main():
  args = parse_args()
  config = Config(args)

  # 出力先の作成
  os.makedirs(config.output_dir, exist_ok=True)

  # モデルの作成
  model = models.generate_model(config.model)

  # 画像サイズの修正
  img_orig = load_image(config.original_image, [config.width, config.height])
  img_style = load_image(config.style_image, [config.width, config.height] if not config.no_resize_style else None)

  # 画像を生成
  generator = models.Generator(model, img_orig, img_style, config)
  generator.generate(config)
Пример #5
0
    def run_single_mode(self):
        """
        traditional single mode
        """
        # STEP1: map-reduce
        result_name = self.mapreduce(sample_name=self.sample_name,
                                     output_name=self.output_name)

        # STEP2: generate model
        result_name2 = generate_model(input_filename=result_name,
                                      output_filename=result_name + "2",
                                      w_matrix_filename=self.w_matrix_filename,
                                      num=self.num)

        # STEP3: test for mapreduce
        self.test(models_name=result_name2,
                  input_name=self.sample_name,
                  output_name=self.output_test)
Пример #6
0
    def run_single_mode(self):
        """
        traditional single mode
        """
        # STEP1: map-reduce
        result_name = self.mapreduce(sample_name=self.sample_name,
                                     output_name=self.output_name)

        # STEP2: generate model
        result_name2 = generate_model(input_filename=result_name,
                                      output_filename=result_name + "2",
                                      w_matrix_filename=self.w_matrix_filename,
                                      num=self.num)

        # STEP3: test for mapreduce
        self.test(models_name=result_name2,
                  input_name=self.sample_name,
                  output_name=self.output_test)
Пример #7
0
def build_model(opt,phase):
    if phase != "test" and phase != "train":
        print("Error: Phase not recognized")
        return
    
    num_classes=opt.n_classes
    model=generate_model(opt)

    #model=gradual_cls(opt.sample_duration,opt.sample_size,opt.sample_size,model,num_classes)
    print(model)
    if phase=='train' and opt.pretrain_path:
        model.load_weights(opt.pretrain_path)

    model = model.cuda()
    if phase=='train':
        model = nn.DataParallel(model, device_ids=range(opt.gpu_num))
    else:
        model = nn.DataParallel(model, device_ids=range(1))
    
    return model
Пример #8
0
    # Use the batch generator. This outputs (train_images, val_images)
    gen = generate_images(directory=str(data_path),
                          batch_size=BATCH_SIZE,
                          labels=CLASS_LABELS,
                          shuffle=True,
                          target_size=(IMAGE_WIDTH, IMAGE_HEIGHT,
                                       NUM_CHANNELS),
                          validation_split=0.3)

    # Example generation. This generates a single batch of training and validation images
    batch_train, label_train, batch_val, label_val = next(gen)

    # Adadelta Model

    model = generate_model('Adadelta', NUM_LABELS=100)
    print(model.summary())

    history = train_model(model, 'Adadelta')

    plt.plot(history.history['loss'], label='loss')
    plt.plot(history.history['val_loss'], label='val_loss')
    plt.xlabel('Epoch')
    plt.ylabel('loss')
    plt.legend(loc='upper right')
    plt.savefig('adadelta_plot.png')

    loss, acc = model.evaluate(batch_val, label_val, verbose=2)
    print(f"Adadelta : Loss = {loss}, Validation Accuracy = {acc}")

    # RMSprop Model
Пример #9
0
    
    # Define parameters for batch generation
    BATCH_SIZE = len(image_data)
    IMAGE_WIDTH, IMAGE_HEIGHT = 128, 128
    NUM_CHANNELS = 3 # RGB Image has 3 channels

    # Use the batch generator. This outputs (train_images, val_images)
    gen = generate_images(directory=str(data_path), batch_size=BATCH_SIZE,
                            labels=CLASS_LABELS,
                            shuffle=True, target_size=(IMAGE_WIDTH, IMAGE_HEIGHT, NUM_CHANNELS),
                            validation_split=0.3)
    
    if args.train == True:
        batch_train, label_train, batch_val, label_val = next(gen)
        
        adadelta_model = generate_model('Adadelta')
        history = train_model(adadelta_model, 'Adadelta')

        plt.plot(history.history['loss'], label='loss')
        plt.plot(history.history['val_loss'], label = 'val_loss')
        plt.xlabel('Epoch')
        plt.ylabel('loss')
        plt.legend(loc='upper right')
        plt.savefig('adadelta_plot.png')

        loss, acc = adadelta_model.evaluate(batch_val,  label_val, verbose=2)
        print(f"Adadelta : Loss = {loss}, Validation Accuracy = {acc}")
        adadelta_model.save_weights('adadelta_model_weights.h5')

        rmsprop_model = generate_model('RMSprop')
        history = train_model(rmsprop_model, 'RMSprop')
Пример #10
0
input_length = 200
batch_size = 256
# input_path = "/home/biot/projects/Audio_generator/data/"
# output_path = "/home/biot/projects/Audio_generator/generated_music/"
input_path = "/root/inspiron/Audio_generator/data/"
output_path = "/root/inspiron/Audio_generator/generated_music/"
sample_length = 500000

os.environ["CUDA_VISIBLE_DEVICES"] = "0"

training_files = files_for_train(_file_path=input_path, extension='mp3')
print(training_files)
#  The length of the Ludmilla mix is 156187055
#  The  length  of  the  Maldova  is  17493167

model = generate_model(input_length)

#model = load_model('music_gen_model_4.h5')

#model = multi_gpu_model(model, gpus=[0, 1, 2, 3])
model.compile(loss='mean_squared_error', optimizer='rmsprop')

model.fit_generator(
    generator=generate_train_file(training_files, batch_size, input_length),
    steps_per_epoch=68000,  # 610000, 68000
    epochs=7)

model.save('music_gen_model_7.h5')

make_sample(model=model,
            input_path=input_path,
Пример #11
0
def train_model(inputs_dir='inputs_training',
                learning_rate=1e-4,
                n_epochs=300,
                crop_size=224,
                resize=256,
                mean=[0.5, 0.5, 0.5],
                std=[0.1, 0.1, 0.1],
                num_classes=2,
                architecture='resnet50',
                batch_size=32,
                predict=False,
                model_save_loc='saved_model.pkl',
                predictions_save_path='predictions.pkl',
                predict_set='test',
                verbose=False,
                class_balance=True,
                extract_embeddings="",
                extract_embeddings_df="",
                embedding_out_dir="./",
                gpu_id=0,
                checkpoints_dir="checkpoints",
                tensor_dataset=False):
    if extract_embeddings:
        assert predict, "Must be in prediction mode to extract embeddings"
    torch.cuda.set_device(gpu_id)
    transformers = generate_transformers if not tensor_dataset else generate_kornia_transforms
    transformers = transformers(image_size=crop_size,
                                resize=resize,
                                mean=mean,
                                std=std)
    if not extract_embeddings:
        if tensor_dataset:
            datasets = {
                x: torch.load(os.path.join(inputs_dir, f"{x}_data.pth"))
                for x in ['train', 'val']
            }
        else:
            datasets = {
                x: Datasets.ImageFolder(os.path.join(inputs_dir, x),
                                        transformers[x])
                for x in ['train', 'val', 'test']
            }

        dataloaders = {
            x: DataLoader(datasets[x],
                          batch_size=batch_size,
                          shuffle=(x == 'train'))
            for x in datasets
        }

    model = generate_model(architecture, num_classes)

    if torch.cuda.is_available():
        model = model.cuda()

    optimizer_opts = dict(name='adam', lr=learning_rate, weight_decay=1e-4)

    scheduler_opts = dict(scheduler='warm_restarts',
                          lr_scheduler_decay=0.5,
                          T_max=10,
                          eta_min=5e-8,
                          T_mult=2)

    trainer = ModelTrainer(model,
                           n_epochs,
                           None if predict else dataloaders['val'],
                           optimizer_opts,
                           scheduler_opts,
                           loss_fn='ce',
                           checkpoints_dir=checkpoints_dir,
                           tensor_dataset=tensor_dataset,
                           transforms=transformers)

    if not predict:

        if class_balance:
            trainer.add_class_balance_loss(
                datasets['train'].targets if not tensor_dataset else
                datasets['train'].tensors[1].numpy())

        trainer, min_val_loss, best_epoch = trainer.fit(dataloaders['train'],
                                                        verbose=verbose)

        torch.save(trainer.model.state_dict(), model_save_loc)

    else:
        assert not tensor_dataset, "Only ImageFolder and NPYDatasets allowed"

        trainer.model.load_state_dict(torch.load(model_save_loc))

        if extract_embeddings and extract_embeddings_df:
            trainer.model = nn.Sequential(trainer.model.features, Reshape())
            patch_info = load_sql_df(extract_embeddings_df, resize)
            dataset = NPYDataset(patch_info, extract_embeddings,
                                 transformers["test"])
            dataset.embed(trainer.model, batch_size, embedding_out_dir)
            exit()

        Y = dict()

        Y['pred'], Y['true'] = trainer.predict(dataloaders[predict_set])

        # Y['true'] = datasets[predict_set].targets

        torch.save(Y, predictions_save_path)