Example #1
0
    def init_screen(self):
        self.init_properties()
        """Creating screen and initializing objects"""
        pygame.init()
        size = [self.win_w, self.win_h]
        pygame.display.set_mode(size, pygame.OPENGL | pygame.DOUBLEBUF)
        pygame.display.set_caption("Skywalker")
        pygame.mouse.set_visible(False)
        gl.glEnable(gl.GL_DEPTH_TEST)
        gl.glEnable(gl.GL_CULL_FACE)
        self.light.enable()
        """ Load model """
        os.chdir('./materials/spaceship/')
        self.ship = Model("spaceship.obj", 0.4, [0, 0, 0], -270, 0, -180)
        os.chdir('../../')

        # os.chdir('./materials/Starship/')
        # self.ship = Model("Starship.obj", 0.01, [0, 0, 0], 90, 0, 180)
        # os.chdir('../../')

        # os.chdir('./materials/NCC-1701/')
        # self.ship = Model("NCC-1701_modified.obj", 1.2, [0, 0, 0], 90, 0, 180)
        # os.chdir('../../')

        # os.chdir('./materials/millenium-falcon/')
        # self.ship = Model("millenium-falcon_modified.obj", 1, [0, 0, 0], 90, 0, 0, using_left=True)
        # os.chdir('../../')

        for i in range(MAX_DISPLAY_AST):
            self.add_ast(isInit=True)
        self.ship_collider = Sphere(self.ship.radius, [0.0, 0.0, 0.0],
                                    [1, 1, 1], False)
        self.skybox.init_sky()
    def test_Moore(self):
        """Moore neighbourhood test."""
        lmZero, lmOne, lmTwo = LifeMap((8, 8)), LifeMap((8, 8)), LifeMap(
            (8, 8))
        lmZero.setCell(2, 3, 1)
        lmZero.setCell(2, 4, 1)
        lmZero.setCell(2, 5, 1)
        lmOne.setCell(1, 4, 1)
        lmOne.setCell(2, 4, 1)
        lmOne.setCell(3, 4, 1)
        lmTwo.setCell(2, 3, 1)
        lmTwo.setCell(2, 4, 1)
        lmTwo.setCell(2, 5, 1)

        model = Model(
            lmZero,
            RulesNearCells(2, 0, True, {
                (0, (5, 3)): 1,
                (1, (5, 3)): 1,
                (1, (6, 2)): 1,
            }))
        model.makeStep()
        self.assertEqual(model.getLifeMap().getCellMatrix(),
                         lmOne.getCellMatrix())
        model.makeStep()
        self.assertEqual(model.getLifeMap().getCellMatrix(),
                         lmTwo.getCellMatrix())
    def test_Margolis(self):
        """Margolisx neighbourhood test."""
        lmZero, lmOne, lmTwo = LifeMap((8, 8)), LifeMap((8, 8)), LifeMap(
            (8, 8))
        lmZero.setCell(0, 0, 1)
        lmZero.setCell(3, 1, 2)
        lmOne.setCell(0, 0, 2)
        lmOne.setCell(3, 1, 1)
        lmTwo.setCell(0, 0, 1)
        lmTwo.setCell(3, 1, 1)

        model = Model(
            lmZero,
            RulesSquares(
                None, {
                    (1, 0, 0, 0): (2, 0, 0, 0),
                    (0, 2, 0, 0): (0, 1, 0, 0),
                    (0, 0, 1, 0): (0, 0, 2, 0),
                    (0, 0, 0, 2): (0, 0, 0, 1),
                }))
        model.makeStep()
        self.assertEqual(model.getLifeMap().getCellMatrix(),
                         lmOne.getCellMatrix())
        model.makeStep()
        self.assertEqual(model.getLifeMap().getCellMatrix(),
                         lmTwo.getCellMatrix())
Example #4
0
def train(modelname: str):
    ds = Dataset()
    emails = ds.get_data()
    md = Model()
    md.train(emails)
    md.serialize(modelname)
    return {"Hello": "World"}
Example #5
0
    def __init__(self,
                 df,
                 column_type,
                 embedding_dim=5,
                 n_layers=5,
                 dim_feedforward=100,
                 n_head=5,
                 dropout=0.15,
                 ns_exponent=0.75,
                 share_category=False,
                 use_pos=False,
                 device='cpu'):

        self.logger = create_logger(name="BERTable")

        self.col_type = {'numerical': [], 'categorical': [], 'vector': []}
        for i, data_type in enumerate(column_type):
            self.col_type[data_type].append(i)

        self.embedding_dim = embedding_dim
        self.use_pos = use_pos
        self.device = device

        self.vocab = Vocab(df, self.col_type, share_category, ns_exponent)

        vocab_size = {
            'numerical': len(self.vocab.item2idx['numerical']),
            'categorical': len(self.vocab.item2idx['categorical'])
        }

        vector_dims = [np.shape(df[col])[1] for col in self.col_type['vector']]
        tab_len = len(column_type)
        self.model = Model(vocab_size, self.col_type, use_pos, vector_dims,
                           embedding_dim, dim_feedforward, tab_len, n_layers,
                           n_head, dropout)
Example #6
0
def main():
    train_loader, val_loader, collate_fn = prepare_dataloaders(hparams)
    model = nn.DataParallel(Model(hparams)).cuda()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=hparams.lr,
                                 betas=(0.9, 0.98),
                                 eps=1e-09)
    criterion = TransformerLoss()
    writer = get_writer(hparams.output_directory, hparams.log_directory)

    iteration, loss = 0, 0
    model.train()
    print("Training Start!!!")
    while iteration < (hparams.train_steps*hparams.accumulation):
        for i, batch in enumerate(train_loader):
            text_padded, text_lengths, mel_padded, mel_lengths, gate_padded = [
                reorder_batch(x, hparams.n_gpus).cuda() for x in batch
            ]

            mel_loss, bce_loss, guide_loss = model(text_padded,
                                                   mel_padded,
                                                   gate_padded,
                                                   text_lengths,
                                                   mel_lengths,
                                                   criterion)

            mel_loss, bce_loss, guide_loss=[
                torch.mean(x) for x in [mel_loss, bce_loss, guide_loss]
            ]
            sub_loss = (mel_loss+bce_loss+guide_loss)/hparams.accumulation
            sub_loss.backward()
            loss = loss+sub_loss.item()

            iteration += 1
            if iteration%hparams.accumulation == 0:
                lr_scheduling(optimizer, iteration//hparams.accumulation)
                nn.utils.clip_grad_norm_(model.parameters(), hparams.grad_clip_thresh)
                optimizer.step()
                model.zero_grad()
                writer.add_losses(mel_loss.item(),
                                  bce_loss.item(),
                                  guide_loss.item(),
                                  iteration//hparams.accumulation, 'Train')
                loss=0


            if iteration%(hparams.iters_per_validation*hparams.accumulation)==0:
                validate(model, criterion, val_loader, iteration, writer)

            if iteration%(hparams.iters_per_checkpoint*hparams.accumulation)==0:
                save_checkpoint(model,
                                optimizer,
                                hparams.lr,
                                iteration//hparams.accumulation,
                                filepath=f'{hparams.output_directory}/{hparams.log_directory}')

            if iteration==(hparams.train_steps*hparams.accumulation):
                break
Example #7
0
def train():

    # 1. Crear modelo
    print('(TRAINER) Creating model...')    
    model = Model()

    # 2. Entrenar clasificador
    print('(TRAINER) Training model...')
    model.train()

    # 3. Guardar clasificador
    print('(TRAINER) Saving model...')
    model.save()

    return model
Example #8
0
def predict(examples):

    # 1. Crear modelo
    print('(CLASSIFIER) Creating model...')
    model = Model()

    # 2. Cargar clasificador
    print('(CLASSIFIER) Loading model...')
    model.load()

    # 3. Calcular prediccion
    prediction = model.predict(examples)
    print('(CLASSIFIER) Prediction obtained (' + str(prediction) + ')')

    return prediction
Example #9
0
    def __init__(self, model_name, corpus_dataset):
        self._config = TrainConfig()
        self._model_name = model_name
        self._data_loader = corpus_dataset.get_data_loader(
            self._config.batch_size)
        self._vocabulary = corpus_dataset.vocabulary
        self._model = Model(vocabulary=corpus_dataset.vocabulary,
                            training=True)
        # TODO: Support for other optimizers
        self._optimizer = optim.Adam(self._model.parameters(),
                                     lr=self._config.learning_rate)
        self._global_step = -1

        self._train_logger = logging.getLogger('Train')
        logging.basicConfig(level=logging.INFO)
Example #10
0
    def add_ast(self, isInit=False):
        """Adding asteroids to a random pos near the ship"""
        size = random.randint(AST_MIN_SIZE, AST_MAX_SIZE)
        pos_x = random.randint(self.ship.pos[0] - AST_RANGE,
                               self.ship.pos[0] + AST_RANGE)
        pos_y = random.randint(self.ship.pos[1]+AST_Y_MIN_INIT, self.ship.pos[1]+AST_Y_MAX_INIT) if isInit \
            else random.randint(self.ship.pos[1]+AST_Y_MIN, self.ship.pos[1]+AST_Y_MAX)
        pos_z = random.randint(self.ship.pos[2] - AST_RANGE,
                               self.ship.pos[2] + AST_RANGE)

        self.asteroids.append(
            Model("materials/ast_lowpoly2/ast_lowpoly2.obj", size,
                  [pos_x, pos_y, pos_z], random.randint(0, 360),
                  random.randint(0, 360), random.randint(0, 360), False, [
                      random.randint(-AST_MOVE_RANGE, AST_MOVE_RANGE),
                      random.randint(-AST_MOVE_RANGE, AST_MOVE_RANGE),
                      random.randint(-AST_MOVE_RANGE, AST_MOVE_RANGE)
                  ], random.randint(-AST_ROT_RANGE, AST_ROT_RANGE)))
        if len(self.asteroids) > MAX_DISPLAY_AST:
            self.asteroids.popleft()
Example #11
0
def main(args):
    train_loader, val_loader, collate_fn = prepare_dataloaders(hp)
    model = Model(hp).cuda()
    optimizer = torch.optim.Adamax(model.parameters(), lr=hp.lr)
    writer = get_writer(hp.output_directory, args.logdir)
    model, optimizer = amp.initialize(model, optimizer, opt_level="O1")

    iteration = 0
    model.train()
    print(f"Training Start!!! ({args.logdir})")
    while iteration < (hp.train_steps):
        for i, batch in enumerate(train_loader):
            text_padded, text_lengths, mel_padded, mel_lengths = [ x.cuda() for x in batch ]
            recon_loss, kl_loss, duration_loss, align_loss = model(text_padded, mel_padded, text_lengths, mel_lengths)

            alpha=min(1, iteration/hp.kl_warmup_steps)
            with amp.scale_loss((recon_loss + alpha*kl_loss + duration_loss + align_loss), optimizer) as scaled_loss:
                scaled_loss.backward()

            iteration += 1
            lr_scheduling(optimizer, iteration)
            nn.utils.clip_grad_norm_(model.parameters(), hp.grad_clip_thresh)
            optimizer.step()
            model.zero_grad()
            writer.add_scalar('train_recon_loss', recon_loss, global_step=iteration)
            writer.add_scalar('train_kl_loss', kl_loss, global_step=iteration)
            writer.add_scalar('train_duration_loss', duration_loss, global_step=iteration)
            writer.add_scalar('train_align_loss', align_loss, global_step=iteration)

            if iteration % (hp.iters_per_validation) == 0:
                validate(model, val_loader, iteration, writer)

            if iteration % (hp.iters_per_checkpoint) == 0:
                save_checkpoint(model, optimizer, hp.lr, iteration, filepath=f'{hp.output_directory}/{args.logdir}')

            if iteration == (hp.train_steps):
                break
def main():
    data_type = 'phone'
    checkpoint_path = f"training_log/aligntts/stage0/checkpoint_{hparams.train_steps[0]}"
    state_dict = {}

    for k, v in torch.load(checkpoint_path)['state_dict'].items():
        state_dict[k[7:]] = v

    model = Model(hparams).cuda()
    model.load_state_dict(state_dict)
    _ = model.cuda().eval()
    criterion = MDNLoss()

    #datasets = ['train', 'val', 'test']
    datasets = ['train']
    batch_size = 64

    for dataset in datasets:
        #with open(f'filelists/ljs_audio_text_{dataset}_filelist.txt', 'r') as f:
        with open(f'/hd0/speech-aligner/metadata/metadata.csv', 'r') as f:
            lines_raw = [line.split('|') for line in f.read().splitlines()]
            lines_list = [
                lines_raw[batch_size * i:batch_size * (i + 1)]
                for i in range(len(lines_raw) // batch_size + 1)
            ]

        for batch in tqdm(lines_list):
            file_list, text_list, mel_list = [], [], []
            text_lengths, mel_lengths = [], []

            for i in range(len(batch)):
                file_name, _, text = batch[i]
                file_name = os.path.splitext(file_name)[0]
                file_list.append(file_name)
                seq = os.path.join(
                    '/hd0/speech-aligner/preprocessed/VCTK20_engspks',
                    f'{data_type}_seq')
                mel = os.path.join(
                    '/hd0/speech-aligner/preprocessed/VCTK20_engspks',
                    'melspectrogram')

                seq = torch.from_numpy(
                    np.load(f'{seq}/{file_name}_sequence.npy'))
                mel = torch.from_numpy(
                    np.load(f'{mel}/{file_name}_melspectrogram.npy'))

                text_list.append(seq)
                mel_list.append(mel)
                text_lengths.append(seq.size(0))
                mel_lengths.append(mel.size(1))

            text_lengths = torch.LongTensor(text_lengths)
            mel_lengths = torch.LongTensor(mel_lengths)
            text_padded = torch.zeros(len(batch),
                                      text_lengths.max().item(),
                                      dtype=torch.long)
            mel_padded = torch.zeros(len(batch), hparams.n_mel_channels,
                                     mel_lengths.max().item())

            for j in range(len(batch)):
                text_padded[j, :text_list[j].size(0)] = text_list[j]
                mel_padded[j, :, :mel_list[j].size(1)] = mel_list[j]

            text_padded = text_padded.cuda()
            mel_padded = mel_padded.cuda()
            mel_padded = (
                torch.clamp(mel_padded, hparams.min_db, hparams.max_db) -
                hparams.min_db) / (hparams.max_db - hparams.min_db)
            text_lengths = text_lengths.cuda()
            mel_lengths = mel_lengths.cuda()

            with torch.no_grad():
                encoder_input = model.Prenet(text_padded)
                hidden_states, _ = model.FFT_lower(encoder_input, text_lengths)
                mu_sigma = model.get_mu_sigma(hidden_states)
                _, log_prob_matrix = criterion(mu_sigma, mel_padded,
                                               text_lengths, mel_lengths)

                align = model.viterbi(log_prob_matrix, text_lengths,
                                      mel_lengths).to(torch.long)
                alignments = list(torch.split(align, 1))

            for j, (l, t) in enumerate(zip(text_lengths, mel_lengths)):
                alignments[j] = alignments[j][0, :l.item(), :t.item()].sum(
                    dim=-1)
                os.makedirs(
                    "/hd0/speech-aligner/preprocessed/VCTK20_engspks/alignments/{}"
                    .format(file_list[j].split('/')[0]),
                    exist_ok=True)
                np.save(
                    f'/hd0/speech-aligner/preprocessed/VCTK20_engspks/alignments/{file_list[j]}_alignment.npy',
                    alignments[j].detach().cpu().numpy())
                # plt.imshow(align[j].detach().cpu().numpy())
                # plt.gca().invert_yaxis()
                # plt.savefig(f"/hd0/speech-aligner/preprocessed/VCTK20_engspks/alignments/{file_list[j]}_alignment.png", format='png')

    print("Alignments Extraction End!!! ({datetime.now()})")
Example #13
0
def main(args):
    train_loader, val_loader, collate_fn = prepare_dataloaders(hparams, stage=args.stage)

    if args.stage!=0:
        checkpoint_path = f"training_log/aligntts/stage{args.stage-1}/checkpoint_{hparams.train_steps[args.stage-1]}"
        state_dict = {}
        for k, v in torch.load(checkpoint_path)['state_dict'].items():
            state_dict[k[7:]]=v

        model = Model(hparams).cuda()
        model.load_state_dict(state_dict)
        model = nn.DataParallel(model).cuda()
    else:
        model = nn.DataParallel(Model(hparams)).cuda()

    criterion = MDNLoss()
    writer = get_writer(hparams.output_directory, f'{hparams.log_directory}/stage{args.stage}')
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=hparams.lr,
                                 betas=(0.9, 0.98),
                                 eps=1e-09)
    iteration, loss = 0, 0
    model.train()

    print(f'Stage{args.stage} Start!!! ({str(datetime.now())})')
    while True:
        for i, batch in enumerate(train_loader):
            if args.stage==0:
                text_padded, mel_padded, text_lengths, mel_lengths = [
                    reorder_batch(x, hparams.n_gpus).cuda() for x in batch
                ]
                align_padded=None
            else:
                text_padded, mel_padded, align_padded, text_lengths, mel_lengths = [
                    reorder_batch(x, hparams.n_gpus).cuda() for x in batch
                ]

            sub_loss = model(text_padded,
                             mel_padded,
                             align_padded,
                             text_lengths,
                             mel_lengths,
                             criterion,
                             stage=args.stage)
            sub_loss = sub_loss.mean()/hparams.accumulation
            sub_loss.backward()
            loss = loss+sub_loss.item()
            iteration += 1

            if iteration%hparams.accumulation == 0:
                lr_scheduling(optimizer, iteration//hparams.accumulation)
                nn.utils.clip_grad_norm_(model.parameters(), hparams.grad_clip_thresh)
                optimizer.step()
                model.zero_grad()
                writer.add_scalar('Train loss', loss, iteration//hparams.accumulation)
                loss=0

            if iteration%(hparams.iters_per_validation*hparams.accumulation)==0:
                validate(model, criterion, val_loader, iteration, writer, args.stage)

            if iteration%(hparams.iters_per_checkpoint*hparams.accumulation)==0:
                save_checkpoint(model,
                                optimizer,
                                hparams.lr,
                                iteration//hparams.accumulation,
                                filepath=f'{hparams.output_directory}/{hparams.log_directory}/stage{args.stage}')

            if iteration==(hparams.train_steps[args.stage]*hparams.accumulation):
                break

        if iteration==(hparams.train_steps[args.stage]*hparams.accumulation):
            break
            
    print(f'Stage{args.stage} End!!! ({str(datetime.now())})')
Example #14
0
def testing(non_adapted_model_dir, adapted_model_dir, classifier_dir,
            nb_clss_labels, feat_path, labels_path, device, src_batch_size,
            trgt_batch_size):
    """Implements the complete test process of the AUDASC method

    :param non_adapted_model_dir: directory of non adapted model
    :param adapted_model_dir: directory of adapted model
    :param classifier_dir: directory of classifier
    :param nb_clss_labels: number of acoustic scene classes
    :param feat_path: directory of test features
    :param labels_path: directory of test labels
    :param device: The device that will be used.
    :param src_batch_size: source batch size
    :param trgt_batch_size: target batch size
    """
    non_adapted_cnn = Model().to(device)
    non_adapted_cnn.load_state_dict(
        torch.load(path.join(non_adapted_model_dir,
                             'non_adapted_cnn.pytorch')))

    adapted_cnn = Model().to(device)
    adapted_cnn.load_state_dict(
        torch.load(path.join(adapted_model_dir, 'target_cnn.pytorch')))

    label_classifier = LabelClassifier(nb_clss_labels).to(device)
    label_classifier.load_state_dict(
        torch.load(path.join(classifier_dir, 'label_classifier.pytorch')))

    non_adapted_cnn.train(False)
    adapted_cnn.train(False)
    label_classifier.train(False)

    feat = file_io.load_pickled_features(feat_path)
    labels = file_io.load_pickled_features(labels_path)

    non_adapted_acc = {}
    adapted_acc = {}

    '********************************************'
    '** testing for all data, device A, B, & C **'
    '********************************************'

    # testing on source data
    src_batch_feat, src_batch_labels = \
        test_step.test_data_mini_batch(feat['A'].to(device), labels['A'].to(device), batch_size=src_batch_size)
    non_adapted_src_correct, adapted_src_correct, src_temp = \
        test_step.test_function(non_adapted_cnn, adapted_cnn, label_classifier, src_batch_feat, src_batch_labels)

    non_adapted_src_len = src_temp * src_batch_size
    adapted_src_len = src_temp * src_batch_size

    # testing on target data
    target_feat = torch.cat([feat['B'], feat['C']], dim=0).to(device)
    target_labels = torch.cat([labels['B'], labels['C']], dim=0).to(device)

    trgt_batch_feat, trgt_batch_labels =\
        test_step.test_data_mini_batch(target_feat, target_labels, batch_size=trgt_batch_size)
    non_adapted_tgt_correct, adapted_tgt_correct, trgt_temp = \
        test_step.test_function(non_adapted_cnn, adapted_cnn, label_classifier, trgt_batch_feat, trgt_batch_labels)

    non_adapted_tgt_len = trgt_temp * trgt_batch_size
    adapted_tgt_len = trgt_temp * trgt_batch_size

    # calculating the accuracy of both models on data from device A
    non_adapted_acc['A'] = math_funcs.to_percentage(non_adapted_src_correct,
                                                    non_adapted_src_len)
    adapted_acc['A'] = math_funcs.to_percentage(adapted_src_correct,
                                                adapted_src_len)

    # calculating the accuracy of both models on data from devices B & C
    non_adapted_acc['BC'] = math_funcs.to_percentage(non_adapted_tgt_correct,
                                                     non_adapted_tgt_len)
    adapted_acc['BC'] = math_funcs.to_percentage(adapted_tgt_correct,
                                                 adapted_tgt_len)

    # calculating the accuracy of both models on data from all devices
    non_adapted_beta, non_adapted_alpha = math_funcs.weighting_factors(
        non_adapted_src_len, non_adapted_tgt_len)
    adapted_beta, adapted_alpha = math_funcs.weighting_factors(
        adapted_src_len, adapted_tgt_len)

    non_adapted_weighted_acc = (non_adapted_beta * non_adapted_acc['A']) + (
        non_adapted_alpha * non_adapted_acc['BC'])
    adapted_weighted_acc = (adapted_beta * adapted_acc['A']) + (
        adapted_alpha * adapted_acc['BC'])

    non_adapted_acc['all'] = non_adapted_weighted_acc
    adapted_acc['all'] = adapted_weighted_acc

    printing.testing_result_msg(non_adapted_acc,
                                adapted_acc,
                                ending='\n',
                                flushing=True)
Example #15
0
def main(args):
    train_loader, val_loader, collate_fn = prepare_dataloaders(
        hparams, stage=args.stage)
    initial_iteration = None
    if args.stage != 0:
        checkpoint_path = f"training_log/aligntts/stage{args.stage-1}/checkpoint_{hparams.train_steps[args.stage-1]}"

        if not os.path.isfile(checkpoint_path):
            print(f'{checkpoint_path} does not exist')
            checkpoint_path = sorted(
                glob(f"training_log/aligntts/stage{args.stage-1}/checkpoint_*")
            )[-1]
            print(f'Loading {checkpoint_path} instead')

        state_dict = {}
        for k, v in torch.load(checkpoint_path)['state_dict'].items():
            state_dict[k[7:]] = v

        model = Model(hparams).cuda()
        model.load_state_dict(state_dict)
        model = nn.DataParallel(model).cuda()
    else:
        if args.pre_trained_model != '':
            if not os.path.isfile(args.pre_trained_model):
                print(f'{args.pre_trained_model} does not exist')

            state_dict = {}
            for k, v in torch.load(
                    args.pre_trained_model)['state_dict'].items():
                state_dict[k[7:]] = v
            initial_iteration = torch.load(args.pre_trained_model)['iteration']
            model = Model(hparams).cuda()
            model.load_state_dict(state_dict)
            model = nn.DataParallel(model).cuda()
        else:

            model = nn.DataParallel(Model(hparams)).cuda()

    criterion = MDNLoss()
    writer = get_writer(hparams.output_directory,
                        f'{hparams.log_directory}/stage{args.stage}')
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=hparams.lr,
                                 betas=(0.9, 0.98),
                                 eps=1e-09)
    iteration, loss = 0, 0
    if initial_iteration is not None:
        iteration = initial_iteration
    model.train()

    print(f'Stage{args.stage} Start!!! ({str(datetime.now())})')
    while True:
        for i, batch in enumerate(train_loader):
            if args.stage == 0:
                text_padded, mel_padded, text_lengths, mel_lengths = [
                    reorder_batch(x, hparams.n_gpus).cuda() for x in batch
                ]
                align_padded = None
            else:
                text_padded, mel_padded, align_padded, text_lengths, mel_lengths = [
                    reorder_batch(x, hparams.n_gpus).cuda() for x in batch
                ]

            sub_loss = model(text_padded,
                             mel_padded,
                             align_padded,
                             text_lengths,
                             mel_lengths,
                             criterion,
                             stage=args.stage,
                             log_viterbi=args.log_viterbi,
                             cpu_viterbi=args.cpu_viterbi)
            sub_loss = sub_loss.mean() / hparams.accumulation
            sub_loss.backward()
            loss = loss + sub_loss.item()
            iteration += 1
            if iteration % 100 == 0:
                print(
                    f'[{str(datetime.now())}] Stage {args.stage} Iter {iteration:<6d} Loss {loss:<8.6f}'
                )

            if iteration % hparams.accumulation == 0:
                lr_scheduling(optimizer, iteration // hparams.accumulation)
                nn.utils.clip_grad_norm_(model.parameters(),
                                         hparams.grad_clip_thresh)
                optimizer.step()
                model.zero_grad()
                writer.add_scalar('Train loss', loss,
                                  iteration // hparams.accumulation)
                writer.add_scalar('Learning rate', get_lr(optimizer),
                                  iteration // hparams.accumulation)
                loss = 0

            if iteration % (hparams.iters_per_validation *
                            hparams.accumulation) == 0:
                validate(model, criterion, val_loader, iteration, writer,
                         args.stage)

            if iteration % (hparams.iters_per_checkpoint *
                            hparams.accumulation) == 0:
                save_checkpoint(
                    model,
                    optimizer,
                    hparams.lr,
                    iteration // hparams.accumulation,
                    filepath=
                    f'{hparams.output_directory}/{hparams.log_directory}/stage{args.stage}'
                )

            if iteration == (hparams.train_steps[args.stage] *
                             hparams.accumulation):
                break

        if iteration == (hparams.train_steps[args.stage] *
                         hparams.accumulation):
            break

    print(f'Stage{args.stage} End!!! ({str(datetime.now())})')
Example #16
0
def analyze(company):
    """ This route responds when the user submits how many models they would like to train.
        It trains and predicts with a model as many times as the user specified and then
        redirects to the final results page.
        
        Parameters:
            company(str): stock symbol of the stock that the model will analyze
    """

    # Reads the user's submission of how many models they would like to train and sets
    # it to 1 if the user entered something besides a positive integer
    try:
        count = int(request.form['count'])
        if (count <= 0 or count > 3):
            count = 1
    except ValueError:
        count = 1

    # Reading the data stored locally and then cleaning out the filesystem
    X_pred = pd.read_csv('prediction_data.csv')
    x = pd.read_csv('x.csv')
    y = pd.read_csv('y.csv')
    os.remove('prediction_data.csv')
    os.remove('x.csv')
    os.remove('y.csv')

    # Stores the final prediction and error of each model after it has
    # completed all of the epochs of traing
    predictions = []
    errors = []

    # Stores the predictions each model makes after each epoch
    prediction_json = []

    for i in range(0, count):

        reg = Model(len(x.columns))

        prediction_history, rmse = reg.train(x, y, X_pred)

        prediction_history.insert(0, 'Model ' + str(i + 1))
        prediction_json.append(prediction_history)

        Y_pred = reg.predict(X_pred)
        predictions.append(Y_pred)
        errors.append(rmse)

    # Average the predictions to get the final or "true" prediction/error
    true_prediction = sum(predictions) / len(predictions)
    true_error = sum(errors) / len(errors)

    # Saving result data so that it can be used in the next route
    session['predictions'] = prediction_json
    session['true_prediction'] = true_prediction
    session['true_error'] = true_error

    print('')
    print('********************')
    print('TRUE PREDICTION: ' + str(true_prediction))
    print('********************')
    print('')
    print('True Error: ' + str(true_error))
    print('')

    return redirect('/' + company + '/' + str(count) + '/' 'results')
Example #17
0
def evaluate(grid_search=False):

    # Lista de 6-uplas (model, params, accuracy, precision, recall, f1_score)
    results_list = []

    # Iterar segun tipos de modelo
    for model_type in const.MODELS:
            
        print()
        print('(EVALUATOR) Evaluating model ' + model_type)

        if grid_search:

            # Lista de 6-uplas (model, params, accuracy, precision, recall, f1_score)
            grid_search_list = []
            param_space = get_parameter_space(model_type)

            for params in param_space:

                # 1. Crear modelo
                model = Model(model=model_type, params={'model': model_type, 'params': params})

                # 2. Entrenar clasificador
                model.train()

                # 3. Evaluar clasificador
                accuracy, results, _, _ = model.evaluate()
                grid_search_list.append((model_type, params, accuracy, results['precision'], results['recall'], results['f1_score']))

            # Ordenar resultados segun f1_score
            grid_search_list = sorted(grid_search_list, key=lambda x: x[5], reverse=True)

            print()
            print('(EVALUATOR) Grid search results -> Model - ', model_type)
            for _, params, accuracy, precision, recall, f1_score in grid_search_list:
                print()
                print("Params - ", params)
                print("-> F1 Score - ", "{0:.2f}".format(f1_score))
                print("-> Precision - ", "{0:.2f}".format(precision))
                print("-> Recall - ", "{0:.2f}".format(recall))
                print("-> Accuracy - ", "{0:.2f}".format(accuracy))
            print()

            best_params = grid_search_list[0][1]
            best_accuracy = grid_search_list[0][2]
            best_precision = grid_search_list[0][3]
            best_recall = grid_search_list[0][4]
            best_f1_score = grid_search_list[0][5]
            results_list.append((model_type, best_params, best_accuracy, best_precision, best_recall, best_f1_score))

        else:

            # 1. Crear modelo
            model = Model(model=model_type)

            # 2. Entrenar clasificador
            model.train()

            # 3. Evaluar clasificador
            accuracy, results, _, _ = model.evaluate()
            results_list.append((model_type, None, accuracy, results['precision'], results['recall'], results['f1_score']))

    # Ordenar resultados segun f1_score
    results_list = sorted(results_list, key=lambda x: x[5], reverse=True)

    # Mostrar resultados
    print()
    print('(EVALUATOR) Sorted results: ')
    for model, params, accuracy, precision, recall, f1_score in results_list:
        print()
        print("Model - ", model)
        if params is not None:
            print("Params - ", params)
        print("-> F1 Score - ", "{0:.2f}".format(f1_score))
        print("-> Precision - ", "{0:.2f}".format(precision))
        print("-> Recall - ", "{0:.2f}".format(recall))
        print("-> Accuracy - ", "{0:.2f}".format(accuracy))
    print()

    best_solution = {
        'model': results_list[0][0],
        'params': results_list[0][1]
    }

    # Elegir mejor modelo, entrenarlo por completo y guardarlo
    model = Model(model=results_list[0][0], params=best_solution)
    model.train()
    model.save()

    print('(EVALUATOR) Trained and saved best model')
Example #18
0
from modules.feature_selectors import ExampleFeatureSelector

device = "cpu"
raw_data_path = None
processed_data_path = None
n_epochs = 10
feature_selector = ExampleFeatureSelector()
train_data = BioactivityData(raw_data_path, processed_data_path,
                             feature_selector)
train_loader = DataLoader(train_data, batch_size=16, shuffle=False)

valid_data = None
valid_loader = None

input_size = train_data[0][0].shape[0]
model = Model(input_size, dim=200, n_res_blocks=2).to(device)

optimizer = torch.optim.Adam(model.parameters())
loss_fn = torch.nn.BCELoss()


def training_epoch(loader, model, opt, loss_fn):
    for iter, (x, y) in loader:
        x, y = x.to(device), y.to(device)
        pred = model(x)

        loss = loss_fn(pred, y)

        opt.zero_grad()
        loss.backward()
        opt.step()
Example #19
0
def initModel():
    """Init model instance."""
    map = LifeMap((50, 50))
    manager = RulesNearCells(2, None, True, {})
    return Model(map, manager)
Example #20
0
def run_model(config):
    model = Model(config)
    model.create_model()
    model.train_model()
    return model
Example #21
0
def collect_model(spider, brand):
    #print("start collect model")
    for model_url in brand.model_urls:
        model = Model(brand.relative_brand_url, model_url)
        t = Thread(target=collect_generation, args=(spider, brand, model))
        t.start()
Example #22
0
    if debug_encoded:
        debug.test_encoded_net(0)
    
    if debug_encrypted:
        debug.test_encrypted_net(1)
        
else:

    ds = Dataset(verbosity = verbosity)
    (train, train_labels), (test, test_labels) = ds.load(2)
    
    exp = Exporter(verbosity = verbosity)
    # exp.exportBestOf(train, train_labels, test, test_labels, params, model_name="model15", num_test=10)
    
    model = exp.load(model_name='model15')
    
    test = test[:coeff_mod]
    test_labels = test_labels[:coeff_mod]
    
    cn = Cryptonet(test, test_labels, model, p_moduli, coeff_mod, precision, True)
    cn.evaluate()
    
    m = Model()
    acc = m.getAccuracy(model, test, test_labels)
    print("Original Accuracy: " + str(acc) + "%")





Example #23
0
def training_process(device, nb_class_labels, model_path, result_dir, patience,
                     epochs, do_pre_train, tr_feat_path, tr_labels_path,
                     val_feat_path, val_labels_path, tr_batch_size,
                     val_batch_size, adapt_patience, adapt_epochs, d_lr,
                     tgt_lr, update_cnt, factor):
    """Implements the complete training process of the AUDASC method.

    :param device: The device that we will use.
    :type device: str
    :param nb_class_labels: The amount of labels for label classification.
    :type nb_class_labels: int
    :param model_path: The path of previously saved model (if any)
    :type model_path: str
    :param result_dir: The directory to save newly pre-trained model.
    :type result_dir: str
    :param patience: The patience for the pre-training step.
    :type patience: int
    :param epochs: The epochs for the pre-training step.
    :type epochs: int
    :param do_pre_train: Flag to indicate if we do pre-training.
    :type do_pre_train: bool
    :param tr_feat_path: The path for loading the training features.
    :type tr_feat_path: str
    :param tr_labels_path: The path for loading the training labels.
    :type tr_labels_path: str
    :param val_feat_path: The path for loading the validation features.
    :type val_feat_path: str
    :param val_labels_path: The path for loading the validation labels.
    :type val_labels_path: str
    :param tr_batch_size: The batch used for pre-training.
    :type tr_batch_size: int
    :param val_batch_size: The batch size used for validation.
    :type val_batch_size: int
    :param adapt_patience: The patience for the domain adaptation step.
    :type adapt_patience: int
    :param adapt_epochs: The epochs for the domain adaptation step.
    :type adapt_epochs: int
    :param d_lr: The learning rate for the discriminator.
    :type d_lr: float
    :param tgt_lr: The learning rate for the adapted model.
    :type tgt_lr: float
    :param update_cnt: An update controller for adversarial loss
    :type update_cnt: int
    :param factor: the coefficient used to be multiplied by classification loss.
    :type factor: int
    """

    tr_feat = device_exchange(file_io.load_pickled_features(tr_feat_path),
                              device=device)
    tr_labels = device_exchange(file_io.load_pickled_features(tr_labels_path),
                                device=device)
    val_feat = device_exchange(file_io.load_pickled_features(val_feat_path),
                               device=device)
    val_labels = device_exchange(
        file_io.load_pickled_features(val_labels_path), device=device)

    loss_func = functional.cross_entropy

    non_adapted_cnn = Model().to(device)
    label_classifier = LabelClassifier(nb_class_labels).to(device)

    if not path.exists(result_dir):
        makedirs(result_dir)

    if do_pre_train:
        state_dict_path = result_dir

        printing.info_msg('Pre-training step')

        optimizer_source = torch.optim.Adam(
            list(non_adapted_cnn.parameters()) +
            list(label_classifier.parameters()),
            lr=1e-4)

        pre_training.pre_training(model=non_adapted_cnn,
                                  label_classifier=label_classifier,
                                  optimizer=optimizer_source,
                                  tr_batch_size=tr_batch_size,
                                  val_batch_size=val_batch_size,
                                  tr_feat=tr_feat['A'],
                                  tr_labels=tr_labels['A'],
                                  val_feat=val_feat['A'],
                                  val_labels=val_labels['A'],
                                  epochs=epochs,
                                  criterion=loss_func,
                                  patience=patience,
                                  result_dir=state_dict_path)

        del optimizer_source

    else:
        printing.info_msg('Loading a pre-trained non-adapted model')
        state_dict_path = model_path

    if not path.exists(state_dict_path):
        raise ValueError(
            'The path for loading the pre trained model does not exist!')

    non_adapted_cnn.load_state_dict(
        torch.load(path.join(state_dict_path, 'non_adapted_cnn.pytorch')))
    label_classifier.load_state_dict(
        torch.load(path.join(state_dict_path, 'label_classifier.pytorch')))

    printing.info_msg('Training the Adversarial Adaptation Model')

    target_cnn = Model().to(device)
    target_cnn.load_state_dict(non_adapted_cnn.state_dict())
    discriminator = Discriminator(2).to(device)

    target_model_opt = torch.optim.Adam(target_cnn.parameters(), lr=tgt_lr)
    discriminator_opt = torch.optim.Adam(discriminator.parameters(), lr=d_lr)

    domain_adaptation.domain_adaptation(
        non_adapted_cnn, target_cnn, label_classifier, discriminator,
        target_model_opt, discriminator_opt, loss_func, loss_func, loss_func,
        tr_feat, tr_labels, val_feat, val_labels, adapt_epochs, update_cnt,
        result_dir, adapt_patience, device, factor)
Example #24
0
from modules.view import View
from modules.model import Model

view = View(10)  # create view - 10 is field size
model = Model(view)  # add logic

view.render()
Example #25
0
import argparse
from modules.model import Model

parser = argparse.ArgumentParser(description='train covid-diagnosis')
parser.add_argument('--model_name', required=True, help='choose model name')
parser.add_argument('--backbone',
                    required=True,
                    help='choose backbone for network')
parser.add_argument('--dataset',
                    required=True,
                    help='choose dataset from x-ray & CT scan data')
parser.add_argument('--grad_cam',
                    default=False,
                    help='visualization of heat map')

args = parser.parse_args()

test_model = Model(args.model_name, args.backbone)
test_model.set_dataset(args.dataset)
test_model.train()
Example #26
0
def main():
    train_loader, val_loader, collate_fn = prepare_dataloaders(hparams)
    model = nn.DataParallel(Model(hparams)).cuda()

    if hparams.pretrained_embedding == True:
        state_dict = torch.load(
            f'{hparams.teacher_dir}/checkpoint_200000')['state_dict']
        for k, v in state_dict.items():
            if k == 'alpha1':
                model.alpha1.data = v

            if k == 'alpha2':
                model.alpha2.data = v

            if 'Embedding' in k:
                setattr(model, k, v)

            if 'Encoder' in k:
                setattr(model, k, v)

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=hparams.lr,
                                 betas=(0.9, 0.98),
                                 eps=1e-09)
    criterion = TransformerLoss()
    writer = get_writer(hparams.output_directory, hparams.log_directory)

    iteration, loss = 0, 0
    model.train()
    print("Training Start!!!")
    while iteration < (hparams.train_steps * hparams.accumulation):
        for i, batch in enumerate(train_loader):
            text_padded, text_lengths, mel_padded, mel_lengths, align_padded = [
                reorder_batch(x, hparams.n_gpus).cuda() for x in batch
            ]
            mel_loss, duration_loss = model(text_padded, mel_padded,
                                            align_padded, text_lengths,
                                            mel_lengths, criterion)

            mel_loss, duration_loss = [
                torch.mean(x) for x in [mel_loss, duration_loss]
            ]
            sub_loss = (mel_loss + duration_loss) / hparams.accumulation
            sub_loss.backward()
            loss = loss + sub_loss.item()

            iteration += 1
            if iteration % hparams.accumulation == 0:
                lr_scheduling(optimizer, iteration // hparams.accumulation)
                torch.nn.utils.clip_grad_norm_(model.parameters(),
                                               hparams.grad_clip_thresh)
                optimizer.step()
                model.zero_grad()
                writer.add_scalar('mel_loss',
                                  mel_loss.item(),
                                  global_step=iteration //
                                  hparams.accumulation)
                writer.add_scalar('duration_loss',
                                  duration_loss.item(),
                                  global_step=iteration //
                                  hparams.accumulation)
                loss = 0

            if iteration % (hparams.iters_per_validation *
                            hparams.accumulation) == 0:
                validate(model, criterion, val_loader, iteration, writer)

            if iteration % (hparams.iters_per_checkpoint *
                            hparams.accumulation) == 0:
                save_checkpoint(
                    model,
                    optimizer,
                    hparams.lr,
                    iteration // hparams.accumulation,
                    filepath=
                    f'{hparams.output_directory}/{hparams.log_directory}')

            if iteration == (hparams.train_steps * hparams.accumulation):
                break
Example #27
0
def train(train_file, validation_file, batch_size, epoch_limit, file_name,
          gpu_mode):

    transformations = transforms.Compose([transforms.ToTensor()])

    sys.stderr.write(TextColor.PURPLE + 'Loading data\n' + TextColor.END)
    train_data_set = PileupDataset(train_file, transformations)
    train_loader = DataLoader(train_data_set,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=16,
                              pin_memory=gpu_mode)
    sys.stderr.write(TextColor.PURPLE + 'Data loading finished\n' +
                     TextColor.END)

    model = Model()
    if gpu_mode:
        model = torch.nn.DataParallel(model).cuda()

    # Loss and Optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=0.001)

    # Train the Model
    sys.stderr.write(TextColor.PURPLE + 'Training starting\n' + TextColor.END)
    seq_len = 3
    iteration_jump = 1
    for epoch in range(epoch_limit):
        total_loss = 0
        total_images = 0
        total_could_be = 0
        for i, (images, labels) in enumerate(train_loader):
            hidden = model.init_hidden(images.size(0))
            # if batch size not distributable among all GPUs then skip
            if gpu_mode is True and images.size(0) % 8 != 0:
                continue

            images = Variable(images, requires_grad=False)
            labels = Variable(labels, requires_grad=False)
            if gpu_mode:
                images = images.cuda()
                labels = labels.cuda()

            for row in range(0, images.size(2), iteration_jump):
                # segmentation of image. Currently using seq_len
                if row + seq_len > images.size(2):
                    continue

                x = images[:, :, row:row + seq_len, :]
                y = labels[:, row:row + seq_len]

                total_variation = torch.sum(y).data[0]
                total_could_be += batch_size
                # print(total_variation)

                if total_variation == 0 and random.uniform(0, 1) * 100 > 5:
                    continue
                elif random.uniform(0,
                                    1) < total_variation / batch_size < 0.02:
                    continue

                # print(x)
                # print(y)
                # exit()

                # Forward + Backward + Optimize
                optimizer.zero_grad()
                outputs = model(x, hidden)
                hidden = repackage_hidden(hidden)
                # print('Label: ', y.data[0])
                # print('Values:', outputs.data[0])
                # print(y.contiguous().view(-1))
                # exit()
                # outputs = outputs.view(1, outputs.size(0), -1) required for CTCLoss

                loss = criterion(outputs.contiguous().view(-1, 3),
                                 y.contiguous().view(-1))
                # print(outputs.contiguous().view(-1, 3).size())
                # print(y.contiguous().view(-1).size())
                # exit()
                loss.backward()
                optimizer.step()

                # loss count
                total_images += batch_size
                total_loss += loss.data[0]

            sys.stderr.write(TextColor.BLUE + "EPOCH: " + str(epoch) +
                             " Batches done: " + str(i + 1))
            sys.stderr.write(" Loss: " + str(total_loss / total_images) +
                             "\n" + TextColor.END)
            print(
                str(epoch) + "\t" + str(i + 1) + "\t" +
                str(total_loss / total_images))

        # After each epoch do validation
        validate(validation_file, batch_size, gpu_mode, model, seq_len)
        sys.stderr.write(TextColor.YELLOW + 'Could be: ' +
                         str(total_could_be) + ' Chosen: ' +
                         str(total_images) + "\n" + TextColor.END)
        sys.stderr.write(TextColor.YELLOW + 'EPOCH: ' + str(epoch))
        sys.stderr.write(' Loss: ' + str(total_loss / total_images) + "\n" +
                         TextColor.END)
        torch.save(model, file_name + '_checkpoint_' + str(epoch) + '.pkl')
        torch.save(
            model.state_dict(),
            file_name + '_checkpoint_' + str(epoch) + '-params' + '.pkl')

    sys.stderr.write(TextColor.PURPLE + 'Finished training\n' + TextColor.END)
    torch.save(model, file_name + '_final.pkl')

    sys.stderr.write(TextColor.PURPLE + 'Model saved as:' + file_name +
                     '.pkl\n' + TextColor.END)
    torch.save(model.state_dict(), file_name + '_final_params' + '.pkl')

    sys.stderr.write(TextColor.PURPLE + 'Model parameters saved as:' +
                     file_name + '-params.pkl\n' + TextColor.END)
Example #28
0
                    help='path to config')

args = parser.parse_args()
config_path = args.config
logger = get_logger(name=ROOT_LOGGER_NAME,
                    console=True,
                    log_level="INFO",
                    propagate=False)

logger.info(f"Reading config from {Path(config_path).absolute()}")
with open(config_path) as con_file:
    config = json.load(con_file)
logger.info(f"Using config {config}")

logger.info(f"Loading model {config.get('model_name')}...")
model = Model(logger, **config)

# setting the api
app = Flask(__name__)
CORS(app)
api = Api(app,
          version=config.get("api_version", "0.0"),
          title='Int20h Final Submission')
ns1 = api.namespace('rating_model',
                    description=config.get('model_name', 'Model'))

# response format
response = api.model(
    'model_response', {
        'book_rating':
        fields.Float(required=True, description='neutral class probability'),
data_type_ = 'char'
data_type = 'phone'
checkpoint_path = f"training_log/aligntts/stage0/checkpoint_40000"

from glob import glob

# checkpoint_path = sorted(glob("training_log/aligntts/stage0/checkpoint_*"))[0]
checkpoint_path = "training_log/aligntts/stage0/checkpoint_40000"

print(checkpoint_path)

state_dict = {}
for k, v in torch.load(checkpoint_path)['state_dict'].items():
    state_dict[k[7:]] = v

model = Model(hparams).cuda()
model.load_state_dict(state_dict)
_ = model.cuda().eval()
criterion = MDNLoss()

import time

datasets = ['train', 'val', 'test']
batch_size = 64
batch_size = 1

start = time.perf_counter()

for dataset in datasets:

    with open(f'filelists/ljs_audio_text_{dataset}_filelist.txt',