예제 #1
0
def initModels(args):
    field = Field(fieldW, fieldH, RabitN, FoxN, Mode.Initialization)

    dataR = toNpArray(field.getStatesR())
    labelR = generateDummyLabels(RabitN)

    dataF = toNpArray(field.getStatesF())
    labelF = generateDummyLabels(FoxN)

    train(dataR, labelR, True, True)
    train(dataF, labelF, False, True)
    return None
예제 #2
0
def trainFromFile(args):
    #stepsCount = None
    if (len(args) > 0):
        stepsCount = int(args[0])

    dataR = loadNpArrayFromFile(pathToDataR)
    labelR = loadNpArrayFromFile(pathToLabelR)
    if (len(labelR.shape) == 3):
        labelR = labelR[:, 0, :]
    train(dataR, labelR, True, False, stepsCount)

    dataF = loadNpArrayFromFile(pathToDataF)
    labelF = loadNpArrayFromFile(pathToLabelF)
    if (len(labelF.shape) == 3):
        labelF = labelF[:, 0, :]
    train(dataF, labelF, False, False, stepsCount)

    return
예제 #3
0
def main():
    logger = logging.getLogger('main')
    global args, best_loss1
    args = parser.parse_args()
    vplt = Dashboard(server='http://127.0.0.1', port=8099, env=args.env)

    if not os.path.exists(os.path.join('checkpoints', args.env)):
        os.makedirs(os.path.join('checkpoints', args.env))

    # create model
    model = densenet()

    # loss functions
    criterion = torch.nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    model = torch.nn.DataParallel(model).cuda()

    # optionally resume from a checkpoint
    if args.resume:
        if args.resume_path is None:
            args.resume_path = os.path.join('checkpoints', args.env,
                                            'latest.pth.tar')

        if os.path.isfile(args.resume_path):
            logger.info("=> loading checkpoint '{}'".format(args.resume_path))
            checkpoint = torch.load(args.resume_path)
            args.start_epoch = checkpoint['epoch']
            best_loss1 = checkpoint['best_loss1']
            model.load_state_dict(checkpoint['state_dict'])
            train_loss = checkpoint['train_loss']
            val_loss = checkpoint['val_loss']
            train_acc = checkpoint['train_acc']
            val_acc = checkpoint['train_acc']
            # optimizer.load_state_dict(checkpoint['optimizer'])
            logger.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume_path, checkpoint['epoch']))
        else:
            logger.info("=> no checkpoint found at '{}'".format(
                args.resume_path))
    else:
        train_loss = {}
        val_loss = {}
        train_acc = {}
        val_acc = {}

    logger.info('=> loading dataset')

    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        'dataset/data',
        train=True,
        download=False,
        transform=transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, )),
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=5)

    val_loader = torch.utils.data.DataLoader(datasets.MNIST(
        'dataset/data',
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, )),
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=5)

    if args.evaluate:
        validate(args=args,
                 val_loader=val_loader,
                 criterion1=criterion,
                 model=model)
        return

    for epoch in range(args.start_epoch, args.epochs):

        adjust_learning_rate(optimizer, args.lr, epoch)

        # train for one epoch
        train_loss_epoch, train_acc_epoch = train(args=args,
                                                  train_loader=train_loader,
                                                  model=model,
                                                  optimizer=optimizer,
                                                  criterion=criterion,
                                                  epoch=epoch)

        # evaluate on validation set
        val_loss_epoch, val_acc_epoch, = validate(args=args,
                                                  val_loader=val_loader,
                                                  criterion1=criterion,
                                                  model=model)

        train_loss[epoch] = train_loss_epoch
        val_loss[epoch] = val_loss_epoch
        train_acc[epoch] = train_acc_epoch
        val_acc[epoch] = val_acc_epoch

        # visualization
        vplt.draw(train_loss, val_loss, 'Loss')
        vplt.draw(train_acc, val_acc, 'Accuracy')

        # remember best loss and save checkpoint
        is_best = val_loss_epoch < best_loss1
        best_loss1 = min(val_loss_epoch, best_loss1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_loss1': best_loss1,
                'optimizer': optimizer.state_dict(),
                'train_loss': train_loss,
                'val_loss': val_loss,
                'train_acc': train_acc,
                'val_acc': val_acc,
            },
            is_best,
            filename='epoch_{}.pth.tar'.format(epoch + 1),
            dir=os.path.join('checkpoints', args.env),
            epoch=epoch)
예제 #4
0
file_names = [name for name in os.listdir(data_dir)]


# Split data into train and validation
val_proportion = config["training"]["val_proportion"]
train_file_names, val_file_names = train_test_split(file_names, test_size=val_proportion, random_state=1, shuffle=True)

BATCH_SIZE = config["training"]["batch_size"]
# Create Dataloader
train_dataset = MelDataset(data_dir, train_file_names)
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collateFunction)

val_dataset = MelDataset(data_dir, val_file_names)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=collateFunction)

# Instantiate model
model = VAE(config["network"]["input_size"], config["network"]["latent_size"])


# Create optimizer and loss
learning_rate = config["training"]["lr"]
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)


data_loaders = {'train': train_loader, 'val': val_loader}
model = train(model, data_loaders, optimizer, config)




예제 #5
0
# define optimizer
if optimizer.lower() == 'adam':
    optimizer = optim.Adam(model.parameters(), lr=lr)
elif optimizer.lower() == 'sgd':
    optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)
else:
    optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)

best_valid_loss = np.inf
iteration = 0
epoch = 1

# args.epochs + 1
# trainint with early stopping
while (epoch <= 5) and (iteration < patience):
    train(train_loader, model, optimizer, epoch, cuda, log_interval)
    valid_loss = test_validation(valid_loader, model, cuda)
    if valid_loss > best_valid_loss:
        iteration += 1
        print('Loss was not improved, iteration {0}'.format(str(iteration)))
    else:
        iteration = 0
        best_valid_loss = valid_loss
    epoch += 1

# write test y
model.eval()
predictions = []
for data, target in test_loader:
    if torch.cuda.is_available():
        data, target = data.cuda(), target.cuda()
예제 #6
0
파일: main.py 프로젝트: neillu23/End2End
    for param in model.ASRmodel.parameters():
        param.requires_grad = False

    # if args.retrain:
    #     args.epochs = args.re_epochs

    try:
        if args.mode == 'train':
            if args.corpus == "TMHINT_DYS":
                # --adim, default=384, type=int, "Number of attention transformation dimensions"
                optimizer = get_std_opt(
                    model.SEmodel.parameters(), 384,
                    model.SEmodel.args.transformer_warmup_steps,
                    model.SEmodel.args.transformer_lr)
            train(model, args.epochs, epoch, best_loss, optimizer, device,
                  loader, writer, args.model_path, args)

        # mode=="test"
        else:
            test(model, device, args.test_noisy, args.test_clean, asr_dict,
                 args.enhance_path, args.score_path, args)

    except KeyboardInterrupt:
        state_dict = {
            'epoch': epoch,
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'best_loss': best_loss
        }
        check_folder(args.checkpoint_path)
        torch.save(state_dict, args.checkpoint_path)
예제 #7
0
from arguments import get_args
from Trainer import train
import numpy as np
import random

if __name__ == "__main__":
  args = get_args()
  global step

train(args)
예제 #8
0
    def step(self):
        print("RCount " + str(self.aliveRabitsCount()) )
        print("FCount " + str(self.aliveFoxesCount()) )

        if(self.aliveRabitsCount()==0 or self.aliveFoxesCount()==0):
            self.running = False
            return
        
        labels = None
        data = None
        agentsFeedback = None
        agentType = -1

        if self.rabitsMove:
            agentType = AgentType.Rabit
            self.clearAgentsInFiledCells() # scheduleRabit.step() will initialize the next filedCells with Rabits 
            
            data = self.getStatesR()
            if self.mode==Mode.Reinforcement:
                movesP = predict(toNpArray(data), True, False)
                moves = self.applyMovesRandomization(movesP)
            else:
                moves = predict(toNpArray(data), True)

            # cleaning the moves for rabits that were eaten by Foxes during previous step
            if self.mode==Mode.Reinforcement:
                for i in range(self.num_rabits) :
                    if self.rabits[i].isDead:
                        moves[i]=-1 # this move should be ignored during reinforcement because rabit died on previous step

            if self.mode==Mode.Training or self.mode==Mode.DataGeneration or self.mode==Mode.Reinforcement:  # get labels for rabits
                labels = self.getLablesR(data)
            
            #if self.mode==Mode.Visualization:
            #    self.describeSituation(data, moves)

            self.setNextPos(self.rabits, moves)        
            self.scheduleRabit.step()

            if self.mode==Mode.Training:
                train(toNpArray(data), toNpArray(labels), True, False)        #train rabits

            #if self.mode==Mode.Reinforcement:
            agentsFeedback = self.getAgentsReinforcementFeedback(self.rabits)

            self.datacollector.collect(self)
        else:
            agentType = AgentType.Fox
            data = self.getStatesF()
            if self.mode==Mode.Reinforcement:
                movesP = predict(toNpArray(data), False, False)
                moves = self.applyMovesRandomization(movesP)
            else:
                moves = predict(toNpArray(data), False)

            #if self.mode==Mode.Training or self.mode==Mode.DataGeneration or self.mode==Mode.Reinforcement: # get labels for foxes
            labels = self.getLablesF(data)
            # calcMoves = [np.argmax(l) for l in labels]
            # errCounter = 0
            # for k in range(len(moves)):
            #     if(moves[k] != calcMoves[k]):
            #         if(labels[k][calcMoves[k]] != labels[k][moves[k]]):
            #             errCounter += 1
            
            self.setNextPos(self.foxes, moves)        
            self.scheduleFox.step()

            self.increaseFoodInFiledCells() # grass is growing in cells

            if self.mode==Mode.Training:
                train(toNpArray(data), toNpArray(labels), False, False)       #train foxes

            #if self.mode==Mode.Reinforcement:
            agentsFeedback = self.getAgentsReinforcementFeedback(self.foxes)

            self.datacollector.collect(self)

        self.rabitsMove = not self.rabitsMove
        self.stepCounter += 1
        print("Field Step = " + str(self.stepCounter))
        return (agentType, data, labels, agentsFeedback, moves)