コード例 #1
0
ファイル: game.py プロジェクト: FUTURETECH6/Skywalker
    def init_screen(self):
        self.init_properties()
        """Creating screen and initializing objects"""
        pygame.init()
        size = [self.win_w, self.win_h]
        pygame.display.set_mode(size, pygame.OPENGL | pygame.DOUBLEBUF)
        pygame.display.set_caption("Skywalker")
        pygame.mouse.set_visible(False)
        gl.glEnable(gl.GL_DEPTH_TEST)
        gl.glEnable(gl.GL_CULL_FACE)
        self.light.enable()
        """ Load model """
        os.chdir('./materials/spaceship/')
        self.ship = Model("spaceship.obj", 0.4, [0, 0, 0], -270, 0, -180)
        os.chdir('../../')

        # os.chdir('./materials/Starship/')
        # self.ship = Model("Starship.obj", 0.01, [0, 0, 0], 90, 0, 180)
        # os.chdir('../../')

        # os.chdir('./materials/NCC-1701/')
        # self.ship = Model("NCC-1701_modified.obj", 1.2, [0, 0, 0], 90, 0, 180)
        # os.chdir('../../')

        # os.chdir('./materials/millenium-falcon/')
        # self.ship = Model("millenium-falcon_modified.obj", 1, [0, 0, 0], 90, 0, 0, using_left=True)
        # os.chdir('../../')

        for i in range(MAX_DISPLAY_AST):
            self.add_ast(isInit=True)
        self.ship_collider = Sphere(self.ship.radius, [0.0, 0.0, 0.0],
                                    [1, 1, 1], False)
        self.skybox.init_sky()
コード例 #2
0
    def __init__(self,
                 df,
                 column_type,
                 embedding_dim=5,
                 n_layers=5,
                 dim_feedforward=100,
                 n_head=5,
                 dropout=0.15,
                 ns_exponent=0.75,
                 share_category=False,
                 use_pos=False,
                 device='cpu'):

        self.logger = create_logger(name="BERTable")

        self.col_type = {'numerical': [], 'categorical': [], 'vector': []}
        for i, data_type in enumerate(column_type):
            self.col_type[data_type].append(i)

        self.embedding_dim = embedding_dim
        self.use_pos = use_pos
        self.device = device

        self.vocab = Vocab(df, self.col_type, share_category, ns_exponent)

        vocab_size = {
            'numerical': len(self.vocab.item2idx['numerical']),
            'categorical': len(self.vocab.item2idx['categorical'])
        }

        vector_dims = [np.shape(df[col])[1] for col in self.col_type['vector']]
        tab_len = len(column_type)
        self.model = Model(vocab_size, self.col_type, use_pos, vector_dims,
                           embedding_dim, dim_feedforward, tab_len, n_layers,
                           n_head, dropout)
コード例 #3
0
    def test_Moore(self):
        """Moore neighbourhood test."""
        lmZero, lmOne, lmTwo = LifeMap((8, 8)), LifeMap((8, 8)), LifeMap(
            (8, 8))
        lmZero.setCell(2, 3, 1)
        lmZero.setCell(2, 4, 1)
        lmZero.setCell(2, 5, 1)
        lmOne.setCell(1, 4, 1)
        lmOne.setCell(2, 4, 1)
        lmOne.setCell(3, 4, 1)
        lmTwo.setCell(2, 3, 1)
        lmTwo.setCell(2, 4, 1)
        lmTwo.setCell(2, 5, 1)

        model = Model(
            lmZero,
            RulesNearCells(2, 0, True, {
                (0, (5, 3)): 1,
                (1, (5, 3)): 1,
                (1, (6, 2)): 1,
            }))
        model.makeStep()
        self.assertEqual(model.getLifeMap().getCellMatrix(),
                         lmOne.getCellMatrix())
        model.makeStep()
        self.assertEqual(model.getLifeMap().getCellMatrix(),
                         lmTwo.getCellMatrix())
コード例 #4
0
    def test_Margolis(self):
        """Margolisx neighbourhood test."""
        lmZero, lmOne, lmTwo = LifeMap((8, 8)), LifeMap((8, 8)), LifeMap(
            (8, 8))
        lmZero.setCell(0, 0, 1)
        lmZero.setCell(3, 1, 2)
        lmOne.setCell(0, 0, 2)
        lmOne.setCell(3, 1, 1)
        lmTwo.setCell(0, 0, 1)
        lmTwo.setCell(3, 1, 1)

        model = Model(
            lmZero,
            RulesSquares(
                None, {
                    (1, 0, 0, 0): (2, 0, 0, 0),
                    (0, 2, 0, 0): (0, 1, 0, 0),
                    (0, 0, 1, 0): (0, 0, 2, 0),
                    (0, 0, 0, 2): (0, 0, 0, 1),
                }))
        model.makeStep()
        self.assertEqual(model.getLifeMap().getCellMatrix(),
                         lmOne.getCellMatrix())
        model.makeStep()
        self.assertEqual(model.getLifeMap().getCellMatrix(),
                         lmTwo.getCellMatrix())
コード例 #5
0
    def __init__(self, model_name, corpus_dataset):
        self._config = TrainConfig()
        self._model_name = model_name
        self._data_loader = corpus_dataset.get_data_loader(
            self._config.batch_size)
        self._vocabulary = corpus_dataset.vocabulary
        self._model = Model(vocabulary=corpus_dataset.vocabulary,
                            training=True)
        # TODO: Support for other optimizers
        self._optimizer = optim.Adam(self._model.parameters(),
                                     lr=self._config.learning_rate)
        self._global_step = -1

        self._train_logger = logging.getLogger('Train')
        logging.basicConfig(level=logging.INFO)
コード例 #6
0
def predict(examples):

    # 1. Crear modelo
    print('(CLASSIFIER) Creating model...')
    model = Model()

    # 2. Cargar clasificador
    print('(CLASSIFIER) Loading model...')
    model.load()

    # 3. Calcular prediccion
    prediction = model.predict(examples)
    print('(CLASSIFIER) Prediction obtained (' + str(prediction) + ')')

    return prediction
コード例 #7
0
ファイル: main.py プロジェクト: yashwanthl/fastapi-helloworld
def predict(emailtext: str, modelname: str):
    md = Model.deserialize(modelname)
    predict = md.predict([emailtext])
    if (predict[0] == 0):
        return {"SPAM": "NO"}
    if (predict[0] == 1):
        return {"SPAM": "YES"}
コード例 #8
0
def main():
    train_loader, val_loader, collate_fn = prepare_dataloaders(hparams)
    model = nn.DataParallel(Model(hparams)).cuda()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=hparams.lr,
                                 betas=(0.9, 0.98),
                                 eps=1e-09)
    criterion = TransformerLoss()
    writer = get_writer(hparams.output_directory, hparams.log_directory)

    iteration, loss = 0, 0
    model.train()
    print("Training Start!!!")
    while iteration < (hparams.train_steps*hparams.accumulation):
        for i, batch in enumerate(train_loader):
            text_padded, text_lengths, mel_padded, mel_lengths, gate_padded = [
                reorder_batch(x, hparams.n_gpus).cuda() for x in batch
            ]

            mel_loss, bce_loss, guide_loss = model(text_padded,
                                                   mel_padded,
                                                   gate_padded,
                                                   text_lengths,
                                                   mel_lengths,
                                                   criterion)

            mel_loss, bce_loss, guide_loss=[
                torch.mean(x) for x in [mel_loss, bce_loss, guide_loss]
            ]
            sub_loss = (mel_loss+bce_loss+guide_loss)/hparams.accumulation
            sub_loss.backward()
            loss = loss+sub_loss.item()

            iteration += 1
            if iteration%hparams.accumulation == 0:
                lr_scheduling(optimizer, iteration//hparams.accumulation)
                nn.utils.clip_grad_norm_(model.parameters(), hparams.grad_clip_thresh)
                optimizer.step()
                model.zero_grad()
                writer.add_losses(mel_loss.item(),
                                  bce_loss.item(),
                                  guide_loss.item(),
                                  iteration//hparams.accumulation, 'Train')
                loss=0


            if iteration%(hparams.iters_per_validation*hparams.accumulation)==0:
                validate(model, criterion, val_loader, iteration, writer)

            if iteration%(hparams.iters_per_checkpoint*hparams.accumulation)==0:
                save_checkpoint(model,
                                optimizer,
                                hparams.lr,
                                iteration//hparams.accumulation,
                                filepath=f'{hparams.output_directory}/{hparams.log_directory}')

            if iteration==(hparams.train_steps*hparams.accumulation):
                break
コード例 #9
0
ファイル: main.py プロジェクト: yashwanthl/fastapi-helloworld
def train(modelname: str):
    ds = Dataset()
    emails = ds.get_data()
    md = Model()
    md.train(emails)
    md.serialize(modelname)
    return {"Hello": "World"}
コード例 #10
0
ファイル: game.py プロジェクト: FUTURETECH6/Skywalker
    def add_ast(self, isInit=False):
        """Adding asteroids to a random pos near the ship"""
        size = random.randint(AST_MIN_SIZE, AST_MAX_SIZE)
        pos_x = random.randint(self.ship.pos[0] - AST_RANGE,
                               self.ship.pos[0] + AST_RANGE)
        pos_y = random.randint(self.ship.pos[1]+AST_Y_MIN_INIT, self.ship.pos[1]+AST_Y_MAX_INIT) if isInit \
            else random.randint(self.ship.pos[1]+AST_Y_MIN, self.ship.pos[1]+AST_Y_MAX)
        pos_z = random.randint(self.ship.pos[2] - AST_RANGE,
                               self.ship.pos[2] + AST_RANGE)

        self.asteroids.append(
            Model("materials/ast_lowpoly2/ast_lowpoly2.obj", size,
                  [pos_x, pos_y, pos_z], random.randint(0, 360),
                  random.randint(0, 360), random.randint(0, 360), False, [
                      random.randint(-AST_MOVE_RANGE, AST_MOVE_RANGE),
                      random.randint(-AST_MOVE_RANGE, AST_MOVE_RANGE),
                      random.randint(-AST_MOVE_RANGE, AST_MOVE_RANGE)
                  ], random.randint(-AST_ROT_RANGE, AST_ROT_RANGE)))
        if len(self.asteroids) > MAX_DISPLAY_AST:
            self.asteroids.popleft()
コード例 #11
0
def train():

    # 1. Crear modelo
    print('(TRAINER) Creating model...')    
    model = Model()

    # 2. Entrenar clasificador
    print('(TRAINER) Training model...')
    model.train()

    # 3. Guardar clasificador
    print('(TRAINER) Saving model...')
    model.save()

    return model
コード例 #12
0
ファイル: train.py プロジェクト: ttslr/BVAE-TTS
def main(args):
    train_loader, val_loader, collate_fn = prepare_dataloaders(hp)
    model = Model(hp).cuda()
    optimizer = torch.optim.Adamax(model.parameters(), lr=hp.lr)
    writer = get_writer(hp.output_directory, args.logdir)
    model, optimizer = amp.initialize(model, optimizer, opt_level="O1")

    iteration = 0
    model.train()
    print(f"Training Start!!! ({args.logdir})")
    while iteration < (hp.train_steps):
        for i, batch in enumerate(train_loader):
            text_padded, text_lengths, mel_padded, mel_lengths = [ x.cuda() for x in batch ]
            recon_loss, kl_loss, duration_loss, align_loss = model(text_padded, mel_padded, text_lengths, mel_lengths)

            alpha=min(1, iteration/hp.kl_warmup_steps)
            with amp.scale_loss((recon_loss + alpha*kl_loss + duration_loss + align_loss), optimizer) as scaled_loss:
                scaled_loss.backward()

            iteration += 1
            lr_scheduling(optimizer, iteration)
            nn.utils.clip_grad_norm_(model.parameters(), hp.grad_clip_thresh)
            optimizer.step()
            model.zero_grad()
            writer.add_scalar('train_recon_loss', recon_loss, global_step=iteration)
            writer.add_scalar('train_kl_loss', kl_loss, global_step=iteration)
            writer.add_scalar('train_duration_loss', duration_loss, global_step=iteration)
            writer.add_scalar('train_align_loss', align_loss, global_step=iteration)

            if iteration % (hp.iters_per_validation) == 0:
                validate(model, val_loader, iteration, writer)

            if iteration % (hp.iters_per_checkpoint) == 0:
                save_checkpoint(model, optimizer, hp.lr, iteration, filepath=f'{hp.output_directory}/{args.logdir}')

            if iteration == (hp.train_steps):
                break
コード例 #13
0
def main(args):
    train_loader, val_loader, collate_fn = prepare_dataloaders(hparams, stage=args.stage)

    if args.stage!=0:
        checkpoint_path = f"training_log/aligntts/stage{args.stage-1}/checkpoint_{hparams.train_steps[args.stage-1]}"
        state_dict = {}
        for k, v in torch.load(checkpoint_path)['state_dict'].items():
            state_dict[k[7:]]=v

        model = Model(hparams).cuda()
        model.load_state_dict(state_dict)
        model = nn.DataParallel(model).cuda()
    else:
        model = nn.DataParallel(Model(hparams)).cuda()

    criterion = MDNLoss()
    writer = get_writer(hparams.output_directory, f'{hparams.log_directory}/stage{args.stage}')
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=hparams.lr,
                                 betas=(0.9, 0.98),
                                 eps=1e-09)
    iteration, loss = 0, 0
    model.train()

    print(f'Stage{args.stage} Start!!! ({str(datetime.now())})')
    while True:
        for i, batch in enumerate(train_loader):
            if args.stage==0:
                text_padded, mel_padded, text_lengths, mel_lengths = [
                    reorder_batch(x, hparams.n_gpus).cuda() for x in batch
                ]
                align_padded=None
            else:
                text_padded, mel_padded, align_padded, text_lengths, mel_lengths = [
                    reorder_batch(x, hparams.n_gpus).cuda() for x in batch
                ]

            sub_loss = model(text_padded,
                             mel_padded,
                             align_padded,
                             text_lengths,
                             mel_lengths,
                             criterion,
                             stage=args.stage)
            sub_loss = sub_loss.mean()/hparams.accumulation
            sub_loss.backward()
            loss = loss+sub_loss.item()
            iteration += 1

            if iteration%hparams.accumulation == 0:
                lr_scheduling(optimizer, iteration//hparams.accumulation)
                nn.utils.clip_grad_norm_(model.parameters(), hparams.grad_clip_thresh)
                optimizer.step()
                model.zero_grad()
                writer.add_scalar('Train loss', loss, iteration//hparams.accumulation)
                loss=0

            if iteration%(hparams.iters_per_validation*hparams.accumulation)==0:
                validate(model, criterion, val_loader, iteration, writer, args.stage)

            if iteration%(hparams.iters_per_checkpoint*hparams.accumulation)==0:
                save_checkpoint(model,
                                optimizer,
                                hparams.lr,
                                iteration//hparams.accumulation,
                                filepath=f'{hparams.output_directory}/{hparams.log_directory}/stage{args.stage}')

            if iteration==(hparams.train_steps[args.stage]*hparams.accumulation):
                break

        if iteration==(hparams.train_steps[args.stage]*hparams.accumulation):
            break
            
    print(f'Stage{args.stage} End!!! ({str(datetime.now())})')
コード例 #14
0
def initModel():
    """Init model instance."""
    map = LifeMap((50, 50))
    manager = RulesNearCells(2, None, True, {})
    return Model(map, manager)
コード例 #15
0
data_type_ = 'char'
data_type = 'phone'
checkpoint_path = f"training_log/aligntts/stage0/checkpoint_40000"

from glob import glob

# checkpoint_path = sorted(glob("training_log/aligntts/stage0/checkpoint_*"))[0]
checkpoint_path = "training_log/aligntts/stage0/checkpoint_40000"

print(checkpoint_path)

state_dict = {}
for k, v in torch.load(checkpoint_path)['state_dict'].items():
    state_dict[k[7:]] = v

model = Model(hparams).cuda()
model.load_state_dict(state_dict)
_ = model.cuda().eval()
criterion = MDNLoss()

import time

datasets = ['train', 'val', 'test']
batch_size = 64
batch_size = 1

start = time.perf_counter()

for dataset in datasets:

    with open(f'filelists/ljs_audio_text_{dataset}_filelist.txt',
コード例 #16
0
def collect_model(spider, brand):
    #print("start collect model")
    for model_url in brand.model_urls:
        model = Model(brand.relative_brand_url, model_url)
        t = Thread(target=collect_generation, args=(spider, brand, model))
        t.start()
コード例 #17
0
def train(train_file, validation_file, batch_size, epoch_limit, file_name,
          gpu_mode):

    transformations = transforms.Compose([transforms.ToTensor()])

    sys.stderr.write(TextColor.PURPLE + 'Loading data\n' + TextColor.END)
    train_data_set = PileupDataset(train_file, transformations)
    train_loader = DataLoader(train_data_set,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=16,
                              pin_memory=gpu_mode)
    sys.stderr.write(TextColor.PURPLE + 'Data loading finished\n' +
                     TextColor.END)

    model = Model()
    if gpu_mode:
        model = torch.nn.DataParallel(model).cuda()

    # Loss and Optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=0.001)

    # Train the Model
    sys.stderr.write(TextColor.PURPLE + 'Training starting\n' + TextColor.END)
    seq_len = 3
    iteration_jump = 1
    for epoch in range(epoch_limit):
        total_loss = 0
        total_images = 0
        total_could_be = 0
        for i, (images, labels) in enumerate(train_loader):
            hidden = model.init_hidden(images.size(0))
            # if batch size not distributable among all GPUs then skip
            if gpu_mode is True and images.size(0) % 8 != 0:
                continue

            images = Variable(images, requires_grad=False)
            labels = Variable(labels, requires_grad=False)
            if gpu_mode:
                images = images.cuda()
                labels = labels.cuda()

            for row in range(0, images.size(2), iteration_jump):
                # segmentation of image. Currently using seq_len
                if row + seq_len > images.size(2):
                    continue

                x = images[:, :, row:row + seq_len, :]
                y = labels[:, row:row + seq_len]

                total_variation = torch.sum(y).data[0]
                total_could_be += batch_size
                # print(total_variation)

                if total_variation == 0 and random.uniform(0, 1) * 100 > 5:
                    continue
                elif random.uniform(0,
                                    1) < total_variation / batch_size < 0.02:
                    continue

                # print(x)
                # print(y)
                # exit()

                # Forward + Backward + Optimize
                optimizer.zero_grad()
                outputs = model(x, hidden)
                hidden = repackage_hidden(hidden)
                # print('Label: ', y.data[0])
                # print('Values:', outputs.data[0])
                # print(y.contiguous().view(-1))
                # exit()
                # outputs = outputs.view(1, outputs.size(0), -1) required for CTCLoss

                loss = criterion(outputs.contiguous().view(-1, 3),
                                 y.contiguous().view(-1))
                # print(outputs.contiguous().view(-1, 3).size())
                # print(y.contiguous().view(-1).size())
                # exit()
                loss.backward()
                optimizer.step()

                # loss count
                total_images += batch_size
                total_loss += loss.data[0]

            sys.stderr.write(TextColor.BLUE + "EPOCH: " + str(epoch) +
                             " Batches done: " + str(i + 1))
            sys.stderr.write(" Loss: " + str(total_loss / total_images) +
                             "\n" + TextColor.END)
            print(
                str(epoch) + "\t" + str(i + 1) + "\t" +
                str(total_loss / total_images))

        # After each epoch do validation
        validate(validation_file, batch_size, gpu_mode, model, seq_len)
        sys.stderr.write(TextColor.YELLOW + 'Could be: ' +
                         str(total_could_be) + ' Chosen: ' +
                         str(total_images) + "\n" + TextColor.END)
        sys.stderr.write(TextColor.YELLOW + 'EPOCH: ' + str(epoch))
        sys.stderr.write(' Loss: ' + str(total_loss / total_images) + "\n" +
                         TextColor.END)
        torch.save(model, file_name + '_checkpoint_' + str(epoch) + '.pkl')
        torch.save(
            model.state_dict(),
            file_name + '_checkpoint_' + str(epoch) + '-params' + '.pkl')

    sys.stderr.write(TextColor.PURPLE + 'Finished training\n' + TextColor.END)
    torch.save(model, file_name + '_final.pkl')

    sys.stderr.write(TextColor.PURPLE + 'Model saved as:' + file_name +
                     '.pkl\n' + TextColor.END)
    torch.save(model.state_dict(), file_name + '_final_params' + '.pkl')

    sys.stderr.write(TextColor.PURPLE + 'Model parameters saved as:' +
                     file_name + '-params.pkl\n' + TextColor.END)
コード例 #18
0
from modules.feature_selectors import ExampleFeatureSelector

device = "cpu"
raw_data_path = None
processed_data_path = None
n_epochs = 10
feature_selector = ExampleFeatureSelector()
train_data = BioactivityData(raw_data_path, processed_data_path,
                             feature_selector)
train_loader = DataLoader(train_data, batch_size=16, shuffle=False)

valid_data = None
valid_loader = None

input_size = train_data[0][0].shape[0]
model = Model(input_size, dim=200, n_res_blocks=2).to(device)

optimizer = torch.optim.Adam(model.parameters())
loss_fn = torch.nn.BCELoss()


def training_epoch(loader, model, opt, loss_fn):
    for iter, (x, y) in loader:
        x, y = x.to(device), y.to(device)
        pred = model(x)

        loss = loss_fn(pred, y)

        opt.zero_grad()
        loss.backward()
        opt.step()
コード例 #19
0
ファイル: trove.py プロジェクト: purepitch/trove
"""
trove -  A program to store and lookup (encrypted) information
"""

import argparse
from modules.model import Model
from modules.view import View
from modules.controller import Controller

parser = argparse.ArgumentParser(
            description="store and lookup encrypted information")
parser.add_argument('--onecmd', nargs=1, help="run a single command")
parser.add_argument('--file', nargs=1, help="name of encrypted input file")

# Initialize Model
model = Model()
model.program_name = "trove"
model.version = "0.1"

# Inititalize View
view = View()

# Parse command line arguments
args = parser.parse_args()

# Initialize Controller
controller = Controller(model, view)

# Say hello
controller.print_hello_message()
コード例 #20
0
class BERTable():
    def __init__(self,
                 df,
                 column_type,
                 embedding_dim=5,
                 n_layers=5,
                 dim_feedforward=100,
                 n_head=5,
                 dropout=0.15,
                 ns_exponent=0.75,
                 share_category=False,
                 use_pos=False,
                 device='cpu'):

        self.logger = create_logger(name="BERTable")

        self.col_type = {'numerical': [], 'categorical': [], 'vector': []}
        for i, data_type in enumerate(column_type):
            self.col_type[data_type].append(i)

        self.embedding_dim = embedding_dim
        self.use_pos = use_pos
        self.device = device

        self.vocab = Vocab(df, self.col_type, share_category, ns_exponent)

        vocab_size = {
            'numerical': len(self.vocab.item2idx['numerical']),
            'categorical': len(self.vocab.item2idx['categorical'])
        }

        vector_dims = [np.shape(df[col])[1] for col in self.col_type['vector']]
        tab_len = len(column_type)
        self.model = Model(vocab_size, self.col_type, use_pos, vector_dims,
                           embedding_dim, dim_feedforward, tab_len, n_layers,
                           n_head, dropout)

    def pretrain(self,
                 df,
                 max_epochs=3,
                 lr=1e-4,
                 lr_weight={
                     'numerical': 0.33,
                     'categorical': 0.33,
                     'vector': 0.33
                 },
                 loss_clip=[0, 100],
                 n_sample=4,
                 mask_rate=0.15,
                 replace_rate=0.8,
                 batch_size=32,
                 shuffle=True,
                 num_workers=1):

        self.model.loss_clip = loss_clip
        self.logger.info("[-] Converting to indices")
        data = self.vocab.convert(df, num_workers)

        self.model.to(self.device)
        self.model.train()
        optimizer = torch.optim.Adam(self.model.parameters(), lr=float(lr))

        self.logger.info("[-] Start Pretraining")

        process_bar = tqdm(range(max_epochs),
                           desc=f"[Progress]",
                           total=max_epochs,
                           leave=True,
                           position=0)

        for epoch in process_bar:

            generator = create_dataloader(data,
                                          self.col_type,
                                          self.vocab,
                                          self.embedding_dim,
                                          self.use_pos,
                                          batch_size,
                                          num_workers,
                                          mask_rate=mask_rate,
                                          replace_rate=replace_rate,
                                          n_sample=n_sample,
                                          shuffle=shuffle)

            metric_bar = tqdm([0],
                              desc=f"[Metric]",
                              bar_format="{desc} {postfix}",
                              leave=False,
                              position=2)

            epoch_bar = tqdm(generator,
                             desc=f"[Epoch]",
                             leave=False,
                             position=1)

            loss_history = {'numerical': [], 'categorical': [], 'vector': []}

            for batch_data in epoch_bar:

                batch_data = transfer(batch_data, self.device)
                _, losses = self.model.forward(batch_data, mode='train')

                loss = sum([
                    losses[data_type] / len(self.col_type[data_type]) *
                    lr_weight[data_type] for data_type in self.col_type
                    if len(self.col_type[data_type]) > 0
                ])

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                display = ''
                for types in losses:
                    loss_history[types].append(losses[types].item())
                    display += f'{types}: {np.mean(loss_history[types]):5.2f} '
                metric_bar.set_postfix_str(display)

            process_bar.write(f'[Log] Epoch {epoch:0>2d}| ' + display)
            epoch_bar.close()
            metric_bar.close()

        process_bar.close()

        self.model.cpu()

    # def transform(self, df, batch_size=32, num_workers=1):
    #     self.logger.info("[-] Converting to indices")
    #     data = self.vocab.convert(df, num_workers)

    #     generator = create_dataloader(
    #         data, self.col_type, self.vocab,
    #         self.embedding_dim, self.use_pos,
    #         batch_size, num_workers, mode='test')

    #     self.logger.info("[-] Start Transforming")

    #     process_bar = tqdm(
    #         generator,
    #         desc=f"[Process]",
    #         leave=False,
    #         position=0)

    #     self.model.to(self.device)
    #     self.model.eval()

    #     df_t = []
    #     for batch_data in process_bar:
    #         batch_data = transfer(batch_data, self.device)
    #         feature = self.model.forward(batch_data, mode='test')
    #         df_t += list(feature.cpu().detach().numpy())

    #     process_bar.close()
    #     self.model.cpu()

    #     return df_t

    def save(self, model_path='model.ckpt', vocab_path='vocab.pkl'):
        torch.save(self.model.state_dict(), model_path)
        with open(vocab_path, 'wb') as file:
            pkl.dump(self.vocab, file)
コード例 #21
0
    if debug_encoded:
        debug.test_encoded_net(0)
    
    if debug_encrypted:
        debug.test_encrypted_net(1)
        
else:

    ds = Dataset(verbosity = verbosity)
    (train, train_labels), (test, test_labels) = ds.load(2)
    
    exp = Exporter(verbosity = verbosity)
    # exp.exportBestOf(train, train_labels, test, test_labels, params, model_name="model15", num_test=10)
    
    model = exp.load(model_name='model15')
    
    test = test[:coeff_mod]
    test_labels = test_labels[:coeff_mod]
    
    cn = Cryptonet(test, test_labels, model, p_moduli, coeff_mod, precision, True)
    cn.evaluate()
    
    m = Model()
    acc = m.getAccuracy(model, test, test_labels)
    print("Original Accuracy: " + str(acc) + "%")





コード例 #22
0
ファイル: game.py プロジェクト: FUTURETECH6/Skywalker
class Game(object):
    """Main game class"""
    def __init__(self, width=1280, height=700):
        self.win_w = width
        self.win_h = height
        self.asteroids = deque()
        self.light = Light([0, -3, 3])
        self.skybox = Skybox(self.win_w, self.win_h)
        self.center = Sphere(0.1, [1.0, 0.0, 1.0], [1, 1, 1])
        self.paused = False
        self.score = 0
        self.camera = Camera()
        self.isRoaming = False

    def init_properties(self):
        """Initialization of game properties"""
        self.isplaying = False
        self.fps_view = False
        self.shield = HARD_INIT_SHIELD
        self.ast_speed = HARD_AST_INIT
        self.ship_speed = HARD_SHIP_INIT
        self.lean_speed = HARD_TILT_INIT

    def init_screen(self):
        self.init_properties()
        """Creating screen and initializing objects"""
        pygame.init()
        size = [self.win_w, self.win_h]
        pygame.display.set_mode(size, pygame.OPENGL | pygame.DOUBLEBUF)
        pygame.display.set_caption("Skywalker")
        pygame.mouse.set_visible(False)
        gl.glEnable(gl.GL_DEPTH_TEST)
        gl.glEnable(gl.GL_CULL_FACE)
        self.light.enable()
        """ Load model """
        os.chdir('./materials/spaceship/')
        self.ship = Model("spaceship.obj", 0.4, [0, 0, 0], -270, 0, -180)
        os.chdir('../../')

        # os.chdir('./materials/Starship/')
        # self.ship = Model("Starship.obj", 0.01, [0, 0, 0], 90, 0, 180)
        # os.chdir('../../')

        # os.chdir('./materials/NCC-1701/')
        # self.ship = Model("NCC-1701_modified.obj", 1.2, [0, 0, 0], 90, 0, 180)
        # os.chdir('../../')

        # os.chdir('./materials/millenium-falcon/')
        # self.ship = Model("millenium-falcon_modified.obj", 1, [0, 0, 0], 90, 0, 0, using_left=True)
        # os.chdir('../../')

        for i in range(MAX_DISPLAY_AST):
            self.add_ast(isInit=True)
        self.ship_collider = Sphere(self.ship.radius, [0.0, 0.0, 0.0],
                                    [1, 1, 1], False)
        self.skybox.init_sky()

    def main_loop(self):
        """Main game loop"""
        self.init_screen()
        pygame.time.set_timer(pygame.USEREVENT + 1,
                              HARD_ACC_TIME_SEP)  # accelerate
        pygame.time.set_timer(pygame.USEREVENT + 2, 1000)  # score++
        # pygame.time.set_timer(pygame.USEREVENT + 3, 1000)  # refresh asts
        has_played = False
        clock = pygame.time.Clock()
        while True:
            delta_time = clock.tick(60) / 10
            fps = int(clock.get_fps())
            event_list = pygame.event.get()
            for e in event_list:
                if e.type == pygame.QUIT:
                    pygame.quit()
                    quit()
                elif e.type == pygame.KEYDOWN:
                    """ Keyboard(Not Controller) """
                    if e.key == pygame.K_ESCAPE or e.key == pygame.K_q:
                        method.quit_program()
                    elif e.key == pygame.K_r:
                        self.isRoaming = not self.isRoaming
                    elif e.key == pygame.K_v:
                        self.fps_view = not self.fps_view
                    elif e.key == pygame.K_p:
                        self.paused = not self.paused
                    elif e.key == pygame.K_SPACE and not self.isplaying:
                        self.score = 0
                        self.isplaying = True
                        has_played = True
                # elif e.type == pygame.MOUSEMOTION:

                elif e.type == pygame.USEREVENT + 1 and self.isplaying and not self.paused:
                    self.ast_speed += HARD_AST_ACC
                    self.ship_speed += HARD_SHIP_ACC
                    self.lean_speed += HARD_TILT_ACC
                elif e.type == pygame.USEREVENT + 2 and self.isplaying and not self.paused:
                    self.score += 1
                # elif e.type == pygame.USEREVENT + 3 and self.isplaying and fps > 24 and not self.paused:
                #     self.add_ast()
            """ Control & Display update """
            if not self.paused:
                if self.isplaying:
                    for ast in self.asteroids:
                        if method.collision_detection(self.ship_collider, ast):
                            self.shield -= 1
                            if self.shield <= 0:  # GAME OVER
                                self.init_properties()
                                self.ship.pos = [0, 0, 0]
                        if ast.pos[1] < -20:  # Reset Ast Pos
                            ast.pos[0] = random.uniform(
                                self.ship.pos[0] - AST_RANGE,
                                self.ship.pos[0] + AST_RANGE)
                            ast.pos[1] = random.randint(AST_Y_MIN, AST_Y_MAX)
                            ast.pos[2] = random.uniform(
                                self.ship.pos[2] - AST_RANGE,
                                self.ship.pos[2] + AST_RANGE)
                        else:
                            ast.pos[1] -= HARD_AST_INIT * delta_time
                        if ENABLE_AST_MOVING:
                            ast.pos[0] += ast.jiggle_speed[0]
                            ast.pos[1] += ast.jiggle_speed[1]
                            ast.pos[2] += ast.jiggle_speed[2]
                        ast.rotate()

                    method.ship_update(self.ship, self.ship_speed,
                                       self.lean_speed, delta_time)
                    self.ship_collider.pos = self.ship.pos[:]
                    self.display(delta_time)
                    method.draw_text([40, self.win_h - 50], str(self.score),
                                     30)
                    method.draw_text([self.win_w - 130, self.win_h - 50],
                                     "FPS: " + str(fps), 30)
                    method.draw_text(
                        [int(self.win_w / 2 - 200), self.win_h - 60],
                        "Shield: " + ">" * self.shield, 30, False,
                        (92, 207, 230))

                else:  # Start or Game Over
                    self.start_screen(delta_time, self.ast_speed)
                    method.draw_text([40, 40], "Esc to exit", 25, False,
                                     (255, 0, 0))
                    if has_played:  # Game Over
                        method.draw_text(
                            [int(self.win_w / 2),
                             int(self.win_h / 3 * 2)], "Game Over", 60, True,
                            (255, 174, 87))
                        method.draw_text(
                            [int(self.win_w / 2),
                             int(self.win_h / 3)],
                            "You scored: " + str(self.score), 40, True)
                    method.draw_text(
                        [int(self.win_w / 2),
                         int(self.win_h / 2)], "Press space to start", 50,
                        True)
                clock.tick(CLK_TICK)
                pygame.display.flip()
            else:
                if self.isRoaming:  # Paused and Roaming
                    self.camera.roam(event_list)
                    self.display(delta_time)
                    method.draw_text(
                        [int(self.win_w / 2),
                         int(self.win_h / 5 * 4)], "Roaming...", 40, True,
                        (255, 174, 87))
                    clock.tick(CLK_TICK)
                    pygame.display.flip()
                else:  # Just paused
                    method.draw_text(
                        [int(self.win_w / 2),
                         int(self.win_h / 3 * 2)], "Paused", 60, True,
                        (255, 174, 87))

    def display(self, delta_time):
        """Refreshing screen, clearing buffers, and redrawing objects"""  # Ciscenje medjuspremnika i ponovno crtanje objekata
        gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        glu.gluPerspective(VIEW_ANGLE, self.win_w / self.win_h, 0.1, 10000)

        if self.isRoaming:
            ctx = self.camera.eyex + sin(self.camera.polarAngle * D2R) * cos(
                self.camera.azimuthAngle * D2R)
            cty = self.camera.eyey + sin(self.camera.polarAngle * D2R) * sin(
                self.camera.azimuthAngle * D2R)
            ctz = self.camera.eyez + cos(self.camera.polarAngle * D2R)
            upx = -cos(self.camera.polarAngle * D2R) * cos(
                self.camera.azimuthAngle * D2R)
            upy = -cos(self.camera.polarAngle * D2R) * sin(
                self.camera.azimuthAngle * D2R)
            upz = sin(self.camera.polarAngle * D2R)
            glu.gluLookAt(self.camera.eyex, self.camera.eyey, self.camera.eyez,
                          ctx, cty, ctz, upx, upy, upz)
        else:
            self.camera.update(self.ship)
            if self.fps_view:
                glu.gluLookAt(self.ship.pos[0], self.ship.pos[1],
                              self.ship.pos[2], self.ship.pos[0],
                              self.ship.pos[1] + 100, self.ship.pos[2], 0, 0,
                              1)
            else:
                glu.gluLookAt(self.camera.eyex, self.camera.eyey,
                              self.camera.eyez + 3, self.ship.pos[0],
                              self.ship.pos[1] + 100, self.ship.pos[2], 0, 0,
                              1)
        self.skybox.sky_pos = self.ship.pos

        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()
        """ Skybox """
        self.light.disable()
        self.skybox.render(self.camera)
        self.light.enable()
        """ Ship """
        self.light.enable()
        self.center.pos = (self.ship.pos[0], self.ship.pos[1],
                           self.ship.pos[2])
        if not self.fps_view:
            self.light.disable()
            self.ship.render()
            self.light.enable()
        else:
            self.center.render()
        self.ship_collider.render()
        self.light.render()
        """ Ast """
        for ast in self.asteroids:
            ast.render()
        self.light.render()

    def start_screen(self, delta_time, speed):
        """Updating a welcome screen (like a screensaver)"""
        gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        glu.gluPerspective(80, self.win_w / self.win_h, 0.1, 10000)
        glu.gluLookAt(0, 0, 0, 0, 100, 0, 0, 0, 1)
        self.skybox.sky_pos = self.ship.pos
        self.light.disable()
        self.skybox.render(self.camera)
        self.light.enable()

        for ast in self.asteroids:
            if ast.pos[2] < -CAMERA_DIST - 10:
                ast.pos[0] = random.randint(
                    (int)(self.ship.pos[0]) - AST_RANGE,
                    (int)(self.ship.pos[0]) + AST_RANGE)
                ast.pos[1] = random.randint(
                    (int)(self.ship.pos[1]) + AST_Y_MIN_INIT,
                    (int)(self.ship.pos[1]) + AST_Y_MAX_INIT)
                ast.pos[2] = random.randint(
                    (int)(self.ship.pos[2]) - AST_RANGE,
                    (int)(self.ship.pos[2]) + AST_RANGE)
            else:
                ast.pos[1] -= speed * delta_time * 0.1
            ast.render()
        self.light.render()

    def add_ast(self, isInit=False):
        """Adding asteroids to a random pos near the ship"""
        size = random.randint(AST_MIN_SIZE, AST_MAX_SIZE)
        pos_x = random.randint(self.ship.pos[0] - AST_RANGE,
                               self.ship.pos[0] + AST_RANGE)
        pos_y = random.randint(self.ship.pos[1]+AST_Y_MIN_INIT, self.ship.pos[1]+AST_Y_MAX_INIT) if isInit \
            else random.randint(self.ship.pos[1]+AST_Y_MIN, self.ship.pos[1]+AST_Y_MAX)
        pos_z = random.randint(self.ship.pos[2] - AST_RANGE,
                               self.ship.pos[2] + AST_RANGE)

        self.asteroids.append(
            Model("materials/ast_lowpoly2/ast_lowpoly2.obj", size,
                  [pos_x, pos_y, pos_z], random.randint(0, 360),
                  random.randint(0, 360), random.randint(0, 360), False, [
                      random.randint(-AST_MOVE_RANGE, AST_MOVE_RANGE),
                      random.randint(-AST_MOVE_RANGE, AST_MOVE_RANGE),
                      random.randint(-AST_MOVE_RANGE, AST_MOVE_RANGE)
                  ], random.randint(-AST_ROT_RANGE, AST_ROT_RANGE)))
        if len(self.asteroids) > MAX_DISPLAY_AST:
            self.asteroids.popleft()
コード例 #23
0
def evaluate(grid_search=False):

    # Lista de 6-uplas (model, params, accuracy, precision, recall, f1_score)
    results_list = []

    # Iterar segun tipos de modelo
    for model_type in const.MODELS:
            
        print()
        print('(EVALUATOR) Evaluating model ' + model_type)

        if grid_search:

            # Lista de 6-uplas (model, params, accuracy, precision, recall, f1_score)
            grid_search_list = []
            param_space = get_parameter_space(model_type)

            for params in param_space:

                # 1. Crear modelo
                model = Model(model=model_type, params={'model': model_type, 'params': params})

                # 2. Entrenar clasificador
                model.train()

                # 3. Evaluar clasificador
                accuracy, results, _, _ = model.evaluate()
                grid_search_list.append((model_type, params, accuracy, results['precision'], results['recall'], results['f1_score']))

            # Ordenar resultados segun f1_score
            grid_search_list = sorted(grid_search_list, key=lambda x: x[5], reverse=True)

            print()
            print('(EVALUATOR) Grid search results -> Model - ', model_type)
            for _, params, accuracy, precision, recall, f1_score in grid_search_list:
                print()
                print("Params - ", params)
                print("-> F1 Score - ", "{0:.2f}".format(f1_score))
                print("-> Precision - ", "{0:.2f}".format(precision))
                print("-> Recall - ", "{0:.2f}".format(recall))
                print("-> Accuracy - ", "{0:.2f}".format(accuracy))
            print()

            best_params = grid_search_list[0][1]
            best_accuracy = grid_search_list[0][2]
            best_precision = grid_search_list[0][3]
            best_recall = grid_search_list[0][4]
            best_f1_score = grid_search_list[0][5]
            results_list.append((model_type, best_params, best_accuracy, best_precision, best_recall, best_f1_score))

        else:

            # 1. Crear modelo
            model = Model(model=model_type)

            # 2. Entrenar clasificador
            model.train()

            # 3. Evaluar clasificador
            accuracy, results, _, _ = model.evaluate()
            results_list.append((model_type, None, accuracy, results['precision'], results['recall'], results['f1_score']))

    # Ordenar resultados segun f1_score
    results_list = sorted(results_list, key=lambda x: x[5], reverse=True)

    # Mostrar resultados
    print()
    print('(EVALUATOR) Sorted results: ')
    for model, params, accuracy, precision, recall, f1_score in results_list:
        print()
        print("Model - ", model)
        if params is not None:
            print("Params - ", params)
        print("-> F1 Score - ", "{0:.2f}".format(f1_score))
        print("-> Precision - ", "{0:.2f}".format(precision))
        print("-> Recall - ", "{0:.2f}".format(recall))
        print("-> Accuracy - ", "{0:.2f}".format(accuracy))
    print()

    best_solution = {
        'model': results_list[0][0],
        'params': results_list[0][1]
    }

    # Elegir mejor modelo, entrenarlo por completo y guardarlo
    model = Model(model=results_list[0][0], params=best_solution)
    model.train()
    model.save()

    print('(EVALUATOR) Trained and saved best model')
コード例 #24
0
ファイル: train.py プロジェクト: ChenX17/aligntts
def main(args):
    train_loader, val_loader, collate_fn = prepare_dataloaders(
        hparams, stage=args.stage)
    initial_iteration = None
    if args.stage != 0:
        checkpoint_path = f"training_log/aligntts/stage{args.stage-1}/checkpoint_{hparams.train_steps[args.stage-1]}"

        if not os.path.isfile(checkpoint_path):
            print(f'{checkpoint_path} does not exist')
            checkpoint_path = sorted(
                glob(f"training_log/aligntts/stage{args.stage-1}/checkpoint_*")
            )[-1]
            print(f'Loading {checkpoint_path} instead')

        state_dict = {}
        for k, v in torch.load(checkpoint_path)['state_dict'].items():
            state_dict[k[7:]] = v

        model = Model(hparams).cuda()
        model.load_state_dict(state_dict)
        model = nn.DataParallel(model).cuda()
    else:
        if args.pre_trained_model != '':
            if not os.path.isfile(args.pre_trained_model):
                print(f'{args.pre_trained_model} does not exist')

            state_dict = {}
            for k, v in torch.load(
                    args.pre_trained_model)['state_dict'].items():
                state_dict[k[7:]] = v
            initial_iteration = torch.load(args.pre_trained_model)['iteration']
            model = Model(hparams).cuda()
            model.load_state_dict(state_dict)
            model = nn.DataParallel(model).cuda()
        else:

            model = nn.DataParallel(Model(hparams)).cuda()

    criterion = MDNLoss()
    writer = get_writer(hparams.output_directory,
                        f'{hparams.log_directory}/stage{args.stage}')
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=hparams.lr,
                                 betas=(0.9, 0.98),
                                 eps=1e-09)
    iteration, loss = 0, 0
    if initial_iteration is not None:
        iteration = initial_iteration
    model.train()

    print(f'Stage{args.stage} Start!!! ({str(datetime.now())})')
    while True:
        for i, batch in enumerate(train_loader):
            if args.stage == 0:
                text_padded, mel_padded, text_lengths, mel_lengths = [
                    reorder_batch(x, hparams.n_gpus).cuda() for x in batch
                ]
                align_padded = None
            else:
                text_padded, mel_padded, align_padded, text_lengths, mel_lengths = [
                    reorder_batch(x, hparams.n_gpus).cuda() for x in batch
                ]

            sub_loss = model(text_padded,
                             mel_padded,
                             align_padded,
                             text_lengths,
                             mel_lengths,
                             criterion,
                             stage=args.stage,
                             log_viterbi=args.log_viterbi,
                             cpu_viterbi=args.cpu_viterbi)
            sub_loss = sub_loss.mean() / hparams.accumulation
            sub_loss.backward()
            loss = loss + sub_loss.item()
            iteration += 1
            if iteration % 100 == 0:
                print(
                    f'[{str(datetime.now())}] Stage {args.stage} Iter {iteration:<6d} Loss {loss:<8.6f}'
                )

            if iteration % hparams.accumulation == 0:
                lr_scheduling(optimizer, iteration // hparams.accumulation)
                nn.utils.clip_grad_norm_(model.parameters(),
                                         hparams.grad_clip_thresh)
                optimizer.step()
                model.zero_grad()
                writer.add_scalar('Train loss', loss,
                                  iteration // hparams.accumulation)
                writer.add_scalar('Learning rate', get_lr(optimizer),
                                  iteration // hparams.accumulation)
                loss = 0

            if iteration % (hparams.iters_per_validation *
                            hparams.accumulation) == 0:
                validate(model, criterion, val_loader, iteration, writer,
                         args.stage)

            if iteration % (hparams.iters_per_checkpoint *
                            hparams.accumulation) == 0:
                save_checkpoint(
                    model,
                    optimizer,
                    hparams.lr,
                    iteration // hparams.accumulation,
                    filepath=
                    f'{hparams.output_directory}/{hparams.log_directory}/stage{args.stage}'
                )

            if iteration == (hparams.train_steps[args.stage] *
                             hparams.accumulation):
                break

        if iteration == (hparams.train_steps[args.stage] *
                         hparams.accumulation):
            break

    print(f'Stage{args.stage} End!!! ({str(datetime.now())})')
コード例 #25
0
def main():
    train_loader, val_loader, collate_fn = prepare_dataloaders(hparams)
    model = nn.DataParallel(Model(hparams)).cuda()

    if hparams.pretrained_embedding == True:
        state_dict = torch.load(
            f'{hparams.teacher_dir}/checkpoint_200000')['state_dict']
        for k, v in state_dict.items():
            if k == 'alpha1':
                model.alpha1.data = v

            if k == 'alpha2':
                model.alpha2.data = v

            if 'Embedding' in k:
                setattr(model, k, v)

            if 'Encoder' in k:
                setattr(model, k, v)

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=hparams.lr,
                                 betas=(0.9, 0.98),
                                 eps=1e-09)
    criterion = TransformerLoss()
    writer = get_writer(hparams.output_directory, hparams.log_directory)

    iteration, loss = 0, 0
    model.train()
    print("Training Start!!!")
    while iteration < (hparams.train_steps * hparams.accumulation):
        for i, batch in enumerate(train_loader):
            text_padded, text_lengths, mel_padded, mel_lengths, align_padded = [
                reorder_batch(x, hparams.n_gpus).cuda() for x in batch
            ]
            mel_loss, duration_loss = model(text_padded, mel_padded,
                                            align_padded, text_lengths,
                                            mel_lengths, criterion)

            mel_loss, duration_loss = [
                torch.mean(x) for x in [mel_loss, duration_loss]
            ]
            sub_loss = (mel_loss + duration_loss) / hparams.accumulation
            sub_loss.backward()
            loss = loss + sub_loss.item()

            iteration += 1
            if iteration % hparams.accumulation == 0:
                lr_scheduling(optimizer, iteration // hparams.accumulation)
                torch.nn.utils.clip_grad_norm_(model.parameters(),
                                               hparams.grad_clip_thresh)
                optimizer.step()
                model.zero_grad()
                writer.add_scalar('mel_loss',
                                  mel_loss.item(),
                                  global_step=iteration //
                                  hparams.accumulation)
                writer.add_scalar('duration_loss',
                                  duration_loss.item(),
                                  global_step=iteration //
                                  hparams.accumulation)
                loss = 0

            if iteration % (hparams.iters_per_validation *
                            hparams.accumulation) == 0:
                validate(model, criterion, val_loader, iteration, writer)

            if iteration % (hparams.iters_per_checkpoint *
                            hparams.accumulation) == 0:
                save_checkpoint(
                    model,
                    optimizer,
                    hparams.lr,
                    iteration // hparams.accumulation,
                    filepath=
                    f'{hparams.output_directory}/{hparams.log_directory}')

            if iteration == (hparams.train_steps * hparams.accumulation):
                break
コード例 #26
0
def run_model(config):
    model = Model(config)
    model.create_model()
    model.train_model()
    return model
コード例 #27
0
class Train:
    def __init__(self, model_name, corpus_dataset):
        self._config = TrainConfig()
        self._model_name = model_name
        self._data_loader = corpus_dataset.get_data_loader(
            self._config.batch_size)
        self._vocabulary = corpus_dataset.vocabulary
        self._model = Model(vocabulary=corpus_dataset.vocabulary,
                            training=True)
        # TODO: Support for other optimizers
        self._optimizer = optim.Adam(self._model.parameters(),
                                     lr=self._config.learning_rate)
        self._global_step = -1

        self._train_logger = logging.getLogger('Train')
        logging.basicConfig(level=logging.INFO)

    def train_step(self, input_seqs, input_lengths, target_seqs, masks):
        self._optimizer.zero_grad()
        step_loss, print_loss, _ = self._model(input_seqs, input_lengths,
                                               target_seqs, masks,
                                               self._global_step)

        self._train_logger.info('Step {}:  Training loss: {}'.format(
            self._global_step, print_loss))

        step_loss.backward()

        if self._config.use_gradient_clipping:
            _ = nn.utils.clip_grad_norm_(self._model.parameters(),
                                         self._config.gradient_clipping_value)

        self._optimizer.step()

    def train(self,
              num_steps,
              save_num_steps,
              save_folder='./data/models/train_dev'):

        if self._global_step < 0:
            self._global_step = 0
        elif self._global_step >= num_steps:
            logging.info(
                'Global step past number of steps requested. No training needed. Global Step = {}. '
                'Num training steps = {}'.format(self._global_step, num_steps))
            return

        stop_training = False

        while not stop_training:
            for input_seqs, input_lengths, target_seqs, masks in self._data_loader:
                self.train_step(input_seqs, input_lengths, target_seqs, masks)
                self._global_step += 1

                if self._global_step % save_num_steps == 0:
                    self.save_checkpoint(save_folder)
                    just_saved = True
                else:
                    just_saved = False

                if self._global_step >= num_steps:
                    stop_training = True
                    logging.info('Finished training at step {}'.format(
                        self._global_step))
                    if not just_saved:
                        self.save_checkpoint(save_folder)
                    break

    def save_checkpoint(self, save_folder):
        makedirs(save_folder, exist_ok=True)
        save_path = path.join(save_folder,
                              'checkpoint-{}.tar'.format(self._global_step))
        logging.info('Saving checkpoint at step {}'.format(self._global_step))
        torch.save(
            {
                'name': self._model_name,
                'global_step': self._global_step,
                'model': self._model.state_dict(),
                'optimizer': self._optimizer.state_dict(),
                'vocabulary': self._vocabulary.__dict__,
            }, save_path)
        logging.info('Checkpoint saved at {}'.format(save_path))

    @staticmethod
    def load_from_checkpoint(checkpoint_path, corpus_dataset):
        checkpoint = torch.load(checkpoint_path)
        train_obj = Train(checkpoint['name'], corpus_dataset)
        train_obj._vocabulary.__dict__ = checkpoint['vocabulary']
        train_obj._global_step = checkpoint['global_step']
        train_obj._model.load_state_dict(checkpoint['model'])
        train_obj._train_logger.info(
            'Restored from checkpoint {}'.format(checkpoint_path))
        return train_obj
コード例 #28
0
def main():
    data_type = 'phone'
    checkpoint_path = f"training_log/aligntts/stage0/checkpoint_{hparams.train_steps[0]}"
    state_dict = {}

    for k, v in torch.load(checkpoint_path)['state_dict'].items():
        state_dict[k[7:]] = v

    model = Model(hparams).cuda()
    model.load_state_dict(state_dict)
    _ = model.cuda().eval()
    criterion = MDNLoss()

    #datasets = ['train', 'val', 'test']
    datasets = ['train']
    batch_size = 64

    for dataset in datasets:
        #with open(f'filelists/ljs_audio_text_{dataset}_filelist.txt', 'r') as f:
        with open(f'/hd0/speech-aligner/metadata/metadata.csv', 'r') as f:
            lines_raw = [line.split('|') for line in f.read().splitlines()]
            lines_list = [
                lines_raw[batch_size * i:batch_size * (i + 1)]
                for i in range(len(lines_raw) // batch_size + 1)
            ]

        for batch in tqdm(lines_list):
            file_list, text_list, mel_list = [], [], []
            text_lengths, mel_lengths = [], []

            for i in range(len(batch)):
                file_name, _, text = batch[i]
                file_name = os.path.splitext(file_name)[0]
                file_list.append(file_name)
                seq = os.path.join(
                    '/hd0/speech-aligner/preprocessed/VCTK20_engspks',
                    f'{data_type}_seq')
                mel = os.path.join(
                    '/hd0/speech-aligner/preprocessed/VCTK20_engspks',
                    'melspectrogram')

                seq = torch.from_numpy(
                    np.load(f'{seq}/{file_name}_sequence.npy'))
                mel = torch.from_numpy(
                    np.load(f'{mel}/{file_name}_melspectrogram.npy'))

                text_list.append(seq)
                mel_list.append(mel)
                text_lengths.append(seq.size(0))
                mel_lengths.append(mel.size(1))

            text_lengths = torch.LongTensor(text_lengths)
            mel_lengths = torch.LongTensor(mel_lengths)
            text_padded = torch.zeros(len(batch),
                                      text_lengths.max().item(),
                                      dtype=torch.long)
            mel_padded = torch.zeros(len(batch), hparams.n_mel_channels,
                                     mel_lengths.max().item())

            for j in range(len(batch)):
                text_padded[j, :text_list[j].size(0)] = text_list[j]
                mel_padded[j, :, :mel_list[j].size(1)] = mel_list[j]

            text_padded = text_padded.cuda()
            mel_padded = mel_padded.cuda()
            mel_padded = (
                torch.clamp(mel_padded, hparams.min_db, hparams.max_db) -
                hparams.min_db) / (hparams.max_db - hparams.min_db)
            text_lengths = text_lengths.cuda()
            mel_lengths = mel_lengths.cuda()

            with torch.no_grad():
                encoder_input = model.Prenet(text_padded)
                hidden_states, _ = model.FFT_lower(encoder_input, text_lengths)
                mu_sigma = model.get_mu_sigma(hidden_states)
                _, log_prob_matrix = criterion(mu_sigma, mel_padded,
                                               text_lengths, mel_lengths)

                align = model.viterbi(log_prob_matrix, text_lengths,
                                      mel_lengths).to(torch.long)
                alignments = list(torch.split(align, 1))

            for j, (l, t) in enumerate(zip(text_lengths, mel_lengths)):
                alignments[j] = alignments[j][0, :l.item(), :t.item()].sum(
                    dim=-1)
                os.makedirs(
                    "/hd0/speech-aligner/preprocessed/VCTK20_engspks/alignments/{}"
                    .format(file_list[j].split('/')[0]),
                    exist_ok=True)
                np.save(
                    f'/hd0/speech-aligner/preprocessed/VCTK20_engspks/alignments/{file_list[j]}_alignment.npy',
                    alignments[j].detach().cpu().numpy())
                # plt.imshow(align[j].detach().cpu().numpy())
                # plt.gca().invert_yaxis()
                # plt.savefig(f"/hd0/speech-aligner/preprocessed/VCTK20_engspks/alignments/{file_list[j]}_alignment.png", format='png')

    print("Alignments Extraction End!!! ({datetime.now()})")
コード例 #29
0
import argparse
from modules.model import Model

parser = argparse.ArgumentParser(description='train covid-diagnosis')
parser.add_argument('--model_name', required=True, help='choose model name')
parser.add_argument('--backbone',
                    required=True,
                    help='choose backbone for network')
parser.add_argument('--dataset',
                    required=True,
                    help='choose dataset from x-ray & CT scan data')
parser.add_argument('--grad_cam',
                    default=False,
                    help='visualization of heat map')

args = parser.parse_args()

test_model = Model(args.model_name, args.backbone)
test_model.set_dataset(args.dataset)
test_model.train()
コード例 #30
0
def training_process(device, nb_class_labels, model_path, result_dir, patience,
                     epochs, do_pre_train, tr_feat_path, tr_labels_path,
                     val_feat_path, val_labels_path, tr_batch_size,
                     val_batch_size, adapt_patience, adapt_epochs, d_lr,
                     tgt_lr, update_cnt, factor):
    """Implements the complete training process of the AUDASC method.

    :param device: The device that we will use.
    :type device: str
    :param nb_class_labels: The amount of labels for label classification.
    :type nb_class_labels: int
    :param model_path: The path of previously saved model (if any)
    :type model_path: str
    :param result_dir: The directory to save newly pre-trained model.
    :type result_dir: str
    :param patience: The patience for the pre-training step.
    :type patience: int
    :param epochs: The epochs for the pre-training step.
    :type epochs: int
    :param do_pre_train: Flag to indicate if we do pre-training.
    :type do_pre_train: bool
    :param tr_feat_path: The path for loading the training features.
    :type tr_feat_path: str
    :param tr_labels_path: The path for loading the training labels.
    :type tr_labels_path: str
    :param val_feat_path: The path for loading the validation features.
    :type val_feat_path: str
    :param val_labels_path: The path for loading the validation labels.
    :type val_labels_path: str
    :param tr_batch_size: The batch used for pre-training.
    :type tr_batch_size: int
    :param val_batch_size: The batch size used for validation.
    :type val_batch_size: int
    :param adapt_patience: The patience for the domain adaptation step.
    :type adapt_patience: int
    :param adapt_epochs: The epochs for the domain adaptation step.
    :type adapt_epochs: int
    :param d_lr: The learning rate for the discriminator.
    :type d_lr: float
    :param tgt_lr: The learning rate for the adapted model.
    :type tgt_lr: float
    :param update_cnt: An update controller for adversarial loss
    :type update_cnt: int
    :param factor: the coefficient used to be multiplied by classification loss.
    :type factor: int
    """

    tr_feat = device_exchange(file_io.load_pickled_features(tr_feat_path),
                              device=device)
    tr_labels = device_exchange(file_io.load_pickled_features(tr_labels_path),
                                device=device)
    val_feat = device_exchange(file_io.load_pickled_features(val_feat_path),
                               device=device)
    val_labels = device_exchange(
        file_io.load_pickled_features(val_labels_path), device=device)

    loss_func = functional.cross_entropy

    non_adapted_cnn = Model().to(device)
    label_classifier = LabelClassifier(nb_class_labels).to(device)

    if not path.exists(result_dir):
        makedirs(result_dir)

    if do_pre_train:
        state_dict_path = result_dir

        printing.info_msg('Pre-training step')

        optimizer_source = torch.optim.Adam(
            list(non_adapted_cnn.parameters()) +
            list(label_classifier.parameters()),
            lr=1e-4)

        pre_training.pre_training(model=non_adapted_cnn,
                                  label_classifier=label_classifier,
                                  optimizer=optimizer_source,
                                  tr_batch_size=tr_batch_size,
                                  val_batch_size=val_batch_size,
                                  tr_feat=tr_feat['A'],
                                  tr_labels=tr_labels['A'],
                                  val_feat=val_feat['A'],
                                  val_labels=val_labels['A'],
                                  epochs=epochs,
                                  criterion=loss_func,
                                  patience=patience,
                                  result_dir=state_dict_path)

        del optimizer_source

    else:
        printing.info_msg('Loading a pre-trained non-adapted model')
        state_dict_path = model_path

    if not path.exists(state_dict_path):
        raise ValueError(
            'The path for loading the pre trained model does not exist!')

    non_adapted_cnn.load_state_dict(
        torch.load(path.join(state_dict_path, 'non_adapted_cnn.pytorch')))
    label_classifier.load_state_dict(
        torch.load(path.join(state_dict_path, 'label_classifier.pytorch')))

    printing.info_msg('Training the Adversarial Adaptation Model')

    target_cnn = Model().to(device)
    target_cnn.load_state_dict(non_adapted_cnn.state_dict())
    discriminator = Discriminator(2).to(device)

    target_model_opt = torch.optim.Adam(target_cnn.parameters(), lr=tgt_lr)
    discriminator_opt = torch.optim.Adam(discriminator.parameters(), lr=d_lr)

    domain_adaptation.domain_adaptation(
        non_adapted_cnn, target_cnn, label_classifier, discriminator,
        target_model_opt, discriminator_opt, loss_func, loss_func, loss_func,
        tr_feat, tr_labels, val_feat, val_labels, adapt_epochs, update_cnt,
        result_dir, adapt_patience, device, factor)
コード例 #31
0
def analyze(company):
    """ This route responds when the user submits how many models they would like to train.
        It trains and predicts with a model as many times as the user specified and then
        redirects to the final results page.
        
        Parameters:
            company(str): stock symbol of the stock that the model will analyze
    """

    # Reads the user's submission of how many models they would like to train and sets
    # it to 1 if the user entered something besides a positive integer
    try:
        count = int(request.form['count'])
        if (count <= 0 or count > 3):
            count = 1
    except ValueError:
        count = 1

    # Reading the data stored locally and then cleaning out the filesystem
    X_pred = pd.read_csv('prediction_data.csv')
    x = pd.read_csv('x.csv')
    y = pd.read_csv('y.csv')
    os.remove('prediction_data.csv')
    os.remove('x.csv')
    os.remove('y.csv')

    # Stores the final prediction and error of each model after it has
    # completed all of the epochs of traing
    predictions = []
    errors = []

    # Stores the predictions each model makes after each epoch
    prediction_json = []

    for i in range(0, count):

        reg = Model(len(x.columns))

        prediction_history, rmse = reg.train(x, y, X_pred)

        prediction_history.insert(0, 'Model ' + str(i + 1))
        prediction_json.append(prediction_history)

        Y_pred = reg.predict(X_pred)
        predictions.append(Y_pred)
        errors.append(rmse)

    # Average the predictions to get the final or "true" prediction/error
    true_prediction = sum(predictions) / len(predictions)
    true_error = sum(errors) / len(errors)

    # Saving result data so that it can be used in the next route
    session['predictions'] = prediction_json
    session['true_prediction'] = true_prediction
    session['true_error'] = true_error

    print('')
    print('********************')
    print('TRUE PREDICTION: ' + str(true_prediction))
    print('********************')
    print('')
    print('True Error: ' + str(true_error))
    print('')

    return redirect('/' + company + '/' + str(count) + '/' 'results')