Exemplo n.º 1
0
 def __init__(self, gradient_accum_steps=5, lr=0.0005, epochs=100, n_class=2, lmb=30):
     self.device = self.set_cuda_device()
     self.net = EFN_Classifier("tf_efficientnet_b1_ns", n_class).to(self.device)
     self.loss_function = nn.MSELoss()
     self.clf_loss_function = nn.CrossEntropyLoss()
     self.optimizer = Ranger(self.net.parameters(), lr=lr, weight_decay=0.999, betas=(0.9, 0.999))
     self.scheduler = CosineAnnealingLR(self.optimizer, epochs * 0.5, lr * 0.0001)
     self.scheduler.last_epoch = epochs
     self.scaler = GradScaler()
     self.epochs = epochs
     self.gradient_accum_steps = gradient_accum_steps
     self.lmb = lmb
 def make_optimizer(model, opt, lr, weight_decay, momentum, nesterov=True):
     if opt == 'SGD':
         optimizer = getattr(torch.optim, opt)(model.parameters(),
                                               lr=lr,
                                               weight_decay=weight_decay,
                                               momentum=momentum,
                                               nesterov=nesterov)
     elif opt == 'AMSGRAD':
         optimizer = getattr(torch.optim, 'Adam')(model.parameters(),
                                                  lr=lr,
                                                  weight_decay=weight_decay,
                                                  amsgrad=True)
     elif opt == 'Ranger':
         optimizer = Ranger(params=filter(lambda p: p.requires_grad,
                                          model.parameters()),
                            lr=lr)
     elif opt == 'RMS':
         optimizer = torch.optim.RMSprop(model.parameters(),
                                         lr=lr,
                                         alpha=0.99,
                                         eps=1e-08,
                                         weight_decay=weight_decay,
                                         momentum=momentum,
                                         centered=False)
     return optimizer
Exemplo n.º 3
0
        def action(self):
            factory = self.__outer.unit()
            garrison = factory.structure_garrison()
            if garrison:
                direction = random.choice(list(bc.Direction))
                if self.__outer._gc.can_unload(factory.id, direction):
                    self.__outer._gc.unload(factory.id, direction)

                    location = factory.location.map_location().add(direction)
                    unit = self.__outer._gc.sense_unit_at_location(location)

                    if unit:  # TODO: Add other unit types' tree containers
                        strategy.Strategy.getInstance().removeInProduction(
                            unit.unit_type)
                        strategy.Strategy.getInstance().addUnit(unit.unit_type)
                        if unit.unit_type == bc.UnitType.Worker:
                            self.__outer._my_units.append(
                                Worker(unit.id, self.__outer._gc,
                                       self.__outer._maps,
                                       self.__outer._my_units))
                        elif unit.unit_type == bc.UnitType.Knight:
                            self.__outer._my_units.append(
                                Knight(unit.id, self.__outer._gc))
                        elif unit.unit_type == bc.UnitType.Healer:
                            self.__outer._my_units.append(
                                Healer(unit.id, self.__outer._gc,
                                       self.__outer._maps))
                        elif unit.unit_type == bc.UnitType.Ranger:
                            self.__outer._my_units.append(
                                Ranger(unit.id, self.__outer._gc))
                        elif unit.unit_type == bc.UnitType.Mage:
                            self.__outer._my_units.append(
                                Mage(unit.id, self.__outer._gc,
                                     self.__outer._maps))
            self._status = bt.Status.SUCCESS
Exemplo n.º 4
0
    def __init__(self, feature_size, n_classes):
        # Network architecture
        super(iCaRLNet, self).__init__()
        #         self.feature_extractor = EfficientNet.from_pretrained('efficientnet-b2')
        self.feature_extractor = resnet18()


        self.feature_extractor.fc =\
            nn.Linear(self.feature_extractor.fc.in_features, feature_size)
        self.bn = nn.BatchNorm1d(feature_size, momentum=0.01)
        self.ReLU = nn.ReLU()
        self.fc = nn.Linear(feature_size, n_classes, bias=False)
        self.fc2 = nn.Linear(1000, 2048)
        self.n_classes = n_classes
        self.n_known = 0

        # List containing exemplar_sets
        # Each exemplar_set is a np.array of N images
        # with shape (N, C, H, W)
        self.exemplar_sets = []

        # Learning method
        self.cls_loss = nn.CrossEntropyLoss()
        self.dist_loss = nn.BCELoss()
        self.optimizer = Ranger(self.parameters())
        #self.optimizer = optim.SGD(self.parameters(), lr=2.0,
        #                           weight_decay=0.00001)

        # Means of exemplars
        self.compute_means = True
        self.exemplar_means = []
    def __init__(self, device, args):
        """
        Initializes a MAML few shot learning system
        :param im_shape: The images input size, in batch, c, h, w shape
        :param device: The device to use to use the model on.
        :param args: A namedtuple of arguments specifying various hyperparameters.
        """
        super(ReptileFewShotClassifier, self).__init__(device, args)

        # Init slow model
        config = AutoConfig.from_pretrained(args.pretrained_weights)
        config.num_labels = args.num_classes_per_set
        self.classifier = AutoModelForSequenceClassification.from_pretrained(
            args.pretrained_weights, config=config
        )

        # init optimizer
        self.optimizer = Ranger(
            self.classifier.parameters(), lr=args.meta_learning_rate
        )
        self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer=self.optimizer,
            T_max=self.args.total_epochs * self.args.total_iter_per_epoch,
            eta_min=self.args.min_learning_rate,
        )
Exemplo n.º 6
0
def get_optimizer(
    hparams,
    models,
    lr=None,
):
    if lr is None:
        lr = hparams.lr
    eps = 1e-8
    parameters = []
    for model in models:
        parameters += list(model.parameters())
    if hparams.optimizer == 'sgd':
        optimizer = SGD(parameters,
                        lr=lr,
                        momentum=hparams.momentum,
                        weight_decay=hparams.weight_decay)
    elif hparams.optimizer == 'adam':
        optimizer = Adam(parameters,
                         lr=lr,
                         eps=eps,
                         weight_decay=hparams.weight_decay)
    elif hparams.optimizer == 'radam':
        optimizer = RAdam(parameters,
                          lr=lr,
                          eps=eps,
                          weight_decay=hparams.weight_decay)
    elif hparams.optimizer == 'ranger':
        optimizer = Ranger(parameters,
                           lr=lr,
                           eps=eps,
                           weight_decay=hparams.weight_decay)
    else:
        raise ValueError('optimizer not recognized!')

    return optimizer
Exemplo n.º 7
0
 def configure_optimizers(self):
     """Optimizer configuration.
     
     Returns:
         object: Optimizer.
     """
     optimizer = Ranger(self.parameters(),
                        lr=self.hparams['lr'],
                        weight_decay=1e-5)
     return optimizer
Exemplo n.º 8
0
    def __init__(self,
                 gen,
                 dis,
                 dataloader_train,
                 dataloader_val,
                 gpu_id,
                 log_freq,
                 save_dir,
                 n_step,
                 optimizer='adam'):
        if torch.cuda.is_available():
            self.device = torch.device('cuda:' + str(gpu_id))
        else:
            self.device = torch.device('cpu')
        self.gen = gen.to(self.device)
        self.dis = dis.to(self.device)

        self.dataloader_train = dataloader_train
        self.dataloader_val = dataloader_val

        if optimizer == 'adam':
            self.optim_g = torch.optim.Adam(gen.parameters(),
                                            lr=1e-4,
                                            betas=(0.5, 0.999))
            self.optim_d = torch.optim.Adam(dis.parameters(),
                                            lr=1e-4,
                                            betas=(0.5, 0.999))
        elif optimizer == 'ranger':
            self.optim_g = Ranger(gen.parameters())
            self.optim_d = Ranger(dis.parameters())

        self.criterionL1 = nn.L1Loss()
        self.criterionVGG = VGGLoss()
        self.criterionAdv = torch.nn.BCELoss()
        self.log_freq = log_freq
        self.save_dir = save_dir
        self.n_step = n_step
        self.step = 0
        print('Generator Parameters:',
              sum([p.nelement() for p in self.gen.parameters()]))
        print('Discriminator Parameters:',
              sum([p.nelement() for p in self.dis.parameters()]))
Exemplo n.º 9
0
	def parse_enemy_type(self, line):
		content = line.strip()
		content = content.lower()

		if content == "brute":
			return Brute()
		elif content == "runner":
			return Runner()
		elif content == "ranger":
			return Ranger()
		else:
			raise CorruptedSaveError("Save file has unknown enemy types")
Exemplo n.º 10
0
    def __init__(self, autoencoder: LSTMAutoEncoder, train_loader: DataLoader,
                 val_loader: DataLoader, cfg: Config):
        self.net = autoencoder
        self.train_loader = train_loader
        self.val_loader = val_loader

        # get from config object
        output_dir = os.path.join(cfg.OUTPUT_DIR, cfg.EXPERIMENT_NAME)
        os.makedirs(output_dir, exist_ok=True)
        self.output_dir = output_dir
        self.device = cfg.DEVICE
        self.epoch_n = cfg.EPOCH_N
        self.save_cycle = cfg.SAVE_CYCLE
        self.verbose_cycle = cfg.VERBOSE_CYCLE
        self.encoder_lr = cfg.ENCODER_LR
        self.decoder_lr = cfg.DECODER_LR
        self.encoder_gamma = cfg.ENCODER_GAMMA
        self.decoder_gamma = cfg.DECODER_GAMMA
        self.encoder_step_cycle = cfg.ENCODER_STEP_CYCLE
        self.decoder_step_cycle = cfg.DECODER_STEP_CYCLE

        # set optimizer and scheduler
        self.encoder_optim = Ranger(params=filter(lambda p: p.requires_grad,
                                                  self.encoder.parameters()),
                                    lr=self.encoder_lr)
        self.decoder_optim = Ranger(params=filter(lambda p: p.requires_grad,
                                                  self.decoder.parameters()),
                                    lr=self.encoder_lr)
        self.encoder_stepper = StepLR(self.encoder_optim,
                                      step_size=self.encoder_step_cycle,
                                      gamma=self.encoder_gamma)
        self.decoder_stepper = StepLR(self.decoder_optim,
                                      step_size=self.decoder_step_cycle,
                                      gamma=self.decoder_gamma)
        self.loss = nn.MSELoss()

        # for book-keeping
        self.crt_epoch = 0
        self.train_losses = []
        self.val_losses = []
Exemplo n.º 11
0
def getCharacter():
    print("Warrior     Mage     Ranger")
    character = None
    answer = raw_input("Answer:")
    if answer.lower() in ['w','warrior']:
        character = Warrior()
    if answer.lower() in ['m','mage']:
        character = Mage() #Character("mage")
    if answer.lower() in ['r','ranger']:
        character = Ranger() #Character("ranger")
    if answer.lower() in ['quit','q']:
        sys.exit()
    return character
Exemplo n.º 12
0
    def _initialize_sprites(self, level_map):
        """A method which initializes the level, creates all objects necessary for the game to run

        Args:
            level_map: the layout of the level in a two dimensional list [y][x]
        """
        height = len(level_map)
        width = len(level_map[0])

        for pos_y in range(height):
            for pos_x in range(width):
                cell = level_map[pos_y][pos_x]
                normalized_x = pos_x * self.cell_size
                normalized_y = pos_y * self.cell_size

                if cell == 0:
                    self.floors.add(Floor(normalized_x, normalized_y))
                elif cell == 1:
                    self.walls.add(Wall(normalized_x, normalized_y))
                elif cell == 2:
                    self.player = Player(pos_x, pos_y, normalized_x,
                                         normalized_y)
                    self.floors.add(Floor(normalized_x, normalized_y))
                    self.level_map[pos_y][pos_x] = self.player
                elif cell == 3:
                    self.stairs = Stairs(normalized_x, normalized_y)
                elif cell == 4:
                    enemy = Slime(pos_x, pos_y, normalized_x, normalized_y)
                    self.enemies.add(enemy)
                    self.floors.add(Floor(normalized_x, normalized_y))
                    self.level_map[pos_y][pos_x] = enemy
                elif cell == 5:
                    enemy = Ranger(pos_x, pos_y, normalized_x, normalized_y)
                    self.enemies.add(enemy)
                    self.floors.add(Floor(normalized_x, normalized_y))
                    self.level_map[pos_y][pos_x] = enemy
                elif cell == 6:
                    item = Item(pos_x, pos_y, normalized_x, normalized_y)
                    self.floors.add(Floor(normalized_x, normalized_y))
                    self.objects.add(item)
                    self.level_map[pos_y][pos_x] = item

        self.all_sprites.add(self.walls, self.floors, self.stairs,
                             self.objects, self.enemies, self.player)

        self.refresh_enemy_queue()
        self.setup_camera()
Exemplo n.º 13
0
def init_optim(optim, params, lr, weight_decay, momentum):
    if optim == 'adam':
        return torch.optim.Adam(params, lr=lr, weight_decay=weight_decay)
    elif optim == 'sgd':
        return torch.optim.SGD(params,
                               lr=lr,
                               momentum=momentum,
                               weight_decay=weight_decay)
    elif optim == 'rmsprop':
        return torch.optim.RMSprop(params,
                                   lr=lr,
                                   momentum=momentum,
                                   weight_decay=weight_decay)
    elif optim == 'ranger':
        return Ranger(params, lr=lr, weight_decay=weight_decay)
    else:
        raise KeyError("Unsupported optim: {}".format(optim))
    def configure_optimizers(self):
        optimizer = Ranger(
            self.model.parameters(),
            lr=self.hparams["learning_rate"],
            alpha=0.5,
            k=6,
            N_sma_threshhold=5,
            weight_decay=self.hparams["weight_decay"],
        )
        scheduler = lr_scheduler.ReduceLROnPlateau(
            optimizer,
            "min",
            factor=0.5,
            verbose=True,
            patience=self.hparams["scheduler_pat"],
        )

        return [optimizer], [{"scheduler": scheduler}]
Exemplo n.º 15
0
def train(train_iter, test_iter, net, feature_params, loss, device, num_epochs,
          file_name):
    net = net.to(device)
    print("training on ", device)
    batch_count = 0
    best_test_acc = 0
    lr = 0.001
    optimizer = Ranger([{
        'params': feature_params
    }, {
        'params': net.fc.parameters(),
        'lr': lr * 10
    }],
                       lr=lr,
                       weight_decay=0.0001)
    # optimizer = optim.SGD([{'params': feature_params},
    #                        {'params': net.fc.parameters(), 'lr': lr * 10}],
    #                       lr=lr, weight_decay=0.001)
    # scheduler = torch.optim.lr_scheduler.CosineAnnealingLr(optimizer, T_max=5, eta_min=4e-08)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=10,
                                                gamma=0.1)
    for epoch in range(1, num_epochs + 1):
        train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
        scheduler.step()
        for X, y in train_iter:
            X = X.to(device)
            y = y.to(device)
            y_hat = net(X)
            l = loss(y_hat, y)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            train_l_sum += l.cpu().item()
            train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()
            n += y.shape[0]
            batch_count += 1
        test_acc = evaluate_accuracy(test_iter, net)
        print(
            'epoch %d, loss %.5f, train_acc %.5f, val_acc %.5f, time %.1f sec'
            % (epoch, train_l_sum / batch_count, train_acc_sum / n, test_acc,
               time.time() - start))
        if test_acc > best_test_acc:
            print('find best! save at model/%s/best.pth' % file_name)
            best_test_acc = test_acc
            torch.save(net.state_dict(), './model/%s/best.pth' % file_name)
            with open('./result/%s.txt' % file_name, 'a') as acc_file:
                acc_file.write('Epoch: %2d, acc: %.8f\n' % (epoch, test_acc))
        if epoch % 10 == 0:
            torch.save(net.state_dict(),
                       './model/%s/checkpoint_%d.pth' % (file_name, epoch))
Exemplo n.º 16
0
    def configure_optimizers(self):
        # With this thing we get only params, which requires grad (weights needed to train)
        params = filter(lambda p: p.requires_grad, self.model.parameters())
        if self.hparams.optimizer == "Ranger":
            self.optimizer = Ranger(params, self.hparams.lr, weight_decay=self.hparams.wd)
        elif self.hparams.optimizer == "SGD":
            self.optimizer = torch.optim.SGD(params, self.hparams.lr, momentum=self.hparams.momentum, weight_decay=self.hparams.wd)
        elif self.hparams.optimizer == "LARS":
            self.optimizer = LARS(params, lr=self.hparams.lr, momentum=self.hparams.momentum, weight_decay=self.hparams.wd, max_epoch=self.hparams.epochs)
        elif self.hparams.optimizer == "RAdam":
            self.optimizer = RAdam(params, lr=self.hparams.lr, weight_decay=self.hparams.wd)

        if self.hparams.scheduler == "Cosine Warm-up":
            self.scheduler = torch.optim.lr_scheduler.OneCycleLR(self.optimizer, self.hparams.lr, epochs=self.hparams.epochs, steps_per_epoch=1, pct_start=self.hparams.pct_start)
        if self.hparams.scheduler == "Cosine Delayed":
            self.scheduler = DelayedCosineAnnealingLR(self.optimizer, self.hparams.flat_epochs, self.hparams.cosine_epochs)

        
        sched_dict = {'scheduler': self.scheduler}


        return [self.optimizer], [sched_dict]
Exemplo n.º 17
0
    def take_turn(self, offensive_units):  #[Unit] => void
        all_enemies = self.gc.sense_nearby_units_by_team(
            bc.MapLocation(self.gc.planet(), 0, 0), 1000000,
            Helper.get_opposing_team(self.gc.team()))

        if len(all_enemies) > 0:
            self.rally_point = all_enemies[0].location.map_location()
            self.on_alert = True
        else:
            self.rally_point = self.get_default_rally_point()
            self.on_alert = False

        for unit in offensive_units:
            soldier = self.unit_info.get(unit.id)
            if soldier is None:
                if unit.unit_type == bc.UnitType.Knight:
                    soldier = Knight(self.gc, self.intel_map, self.mov,
                                     self.astro, Role.Attack, unit)
                elif unit.unit_type == bc.UnitType.Ranger:
                    soldier = Ranger(self.gc, self.intel_map, self.mov,
                                     self.astro, Role.Attack, unit)
                elif unit.unit_type == bc.UnitType.Mage:
                    soldier = Mage(self.gc, self.intel_map, self.mov,
                                   self.astro, Role.Attack, unit)
                elif unit.unit_type == bc.UnitType.Healer:
                    soldier = Healer(self.gc, self.intel_map, self.mov,
                                     self.astro, Role.Attack, unit, self)
                else:
                    soldier = Soldier(self.gc, self.intel_map, self.mov,
                                      self.astro, Role.Scout, unit)
            soldier.set_rally_point(self.rally_point)
            self.unit_info[unit.id] = soldier
            if self.gc.planet() == bc.Planet.Mars:
                soldier.set_role(Role.Scout)
            soldier.set_rally_point(self.rally_point)
            soldier.take_turn(unit, self.on_alert)
            #Carry out soldier move
            soldier.move_and_attack()
Exemplo n.º 18
0
 def take_extra_turn(self, unit):  # Unit => void
     if (self.gc.can_sense_unit(unit.id)):
         unit = self.gc.unit(unit.id)
     else:
         print("Commander: Can't sense soldier !!")
     #print("Commander: Overcharged unit attack heat is: " + str(unit.attack_heat()));
     soldier = self.unit_info.get(unit.id)
     if soldier is None:
         if unit.unit_type == bc.UnitType.Knight:
             soldier = Knight(self.gc, self.intel_map, self.mov, self.astro,
                              Role.Attack, unit)
         elif unit.unit_type == bc.UnitType.Ranger:
             soldier = Ranger(self.gc, self.intel_map, self.mov, self.astro,
                              Role.Attack, unit)
         elif unit.unit_type == bc.UnitType.Mage:
             soldier = Mage(self.gc, self.intel_map, self.mov, self.astro,
                            Role.Attack, unit)
         elif unit.unit_type == bc.UnitType.Healer:
             soldier = Healer(self.gc, self.intel_map, self.mov, self.astro,
                              Role.Attack, unit, self)
     if not (soldier is None):
         soldier.take_turn(unit, self.on_alert)
         soldier.move_and_attack()
Exemplo n.º 19
0
                       lr=args.lr,
                       betas=(.9, .999),
                       weight_decay=args.weight_decay)
    lr_decay_gamma = 0.3
elif args.optimizer == 'lars':
    from torchlars import LARS
    base_optimizer = optim.SGD(model.parameters(),
                               lr=args.lr,
                               momentum=0.9,
                               weight_decay=args.weight_decay)
    optim = LARS(base_optimizer, eps=1e-8, trust_coef=0.001)
    lr_decay_gamma = 0.1
elif args.optimizer == 'ranger':
    from ranger import Ranger
    optim = Ranger(model.parameters(),
                   weight_decay=args.weight_decay,
                   lr=args.lr)
else:
    raise NotImplementedError()

# normal_class=[args.known_normal], known_outlier_class=args.known_outlier,
print("known_normal:", args.known_normal, "known_outlier:", args.known_outlier)
# if args.lr_scheduler == 'cosine':
# scheduler = lr_scheduler.CosineAnnealingLR(optim, args.n_epochs)
# print("use cosine scheduler")
# Evaluation before training
rotation = args.shift_trans
# m_in,m_out = test(model, test_loader, train_loader, -1,return_minout=True)
# escore = test(model, test_loader, train_loader, -1)
# raise
criterion = nn.CrossEntropyLoss()
Exemplo n.º 20
0
    robot.playsound(SOUNDS["toy_out"])
    robot.lightpattern(PATTERNS["toy_out"])

class runner:
    def __init__(self, fn):
        self.fn = fn

    def __call__(self, robot):
        global current_action

        if current_action:
            current_action.cancel()

        current_action = self.fn(robot)

with Ranger() as robot:

    # Turn on DEBUG logging
    robot.debug()

    robot.state["asleep"] = True

    robot.show_battery()
    blinking = robot.background_blink()

    logger.info("Ok! Let's start!")
    logger.info("Waiting for the lolette to be removed...")
    robot.whenever("lolette", becomes = True).do(runner(on_lolette))
    robot.whenever("lolette", becomes = False).do(runner(on_lolette_removed))
    robot.whenever("scale", increase = 0.1, max_firing_freq = 0.3).do(on_toy_added)
    robot.whenever("scale", decrease = 0.1, max_firing_freq = 0.3).do(on_toy_removed)
Exemplo n.º 21
0
def train_fold():
    #get arguments
    opts=get_args()

    #gpu selection
    os.environ["CUDA_VISIBLE_DEVICES"] = opts.gpu_id
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    #instantiate datasets
    json_path=os.path.join(opts.path,'train.json')

    json=pd.read_json(json_path,lines=True)
    json=json[json.signal_to_noise > opts.noise_filter]
    ids=np.asarray(json.id.to_list())


    error_weights=get_errors(json)
    error_weights=opts.error_alpha+np.exp(-error_weights*opts.error_beta)
    train_indices,val_indices=get_train_val_indices(json,opts.fold,SEED=2020,nfolds=opts.nfolds)

    _,labels=get_data(json)
    sequences=np.asarray(json.sequence)
    train_seqs=sequences[train_indices]
    val_seqs=sequences[val_indices]
    train_labels=labels[train_indices]
    val_labels=labels[val_indices]
    train_ids=ids[train_indices]
    val_ids=ids[val_indices]
    train_ew=error_weights[train_indices]
    val_ew=error_weights[val_indices]

    #train_inputs=np.stack([train_inputs],0)
    #val_inputs=np.stack([val_inputs,val_inputs2],0)
    dataset=RNADataset(train_seqs,train_labels,train_ids, train_ew, opts.path)
    val_dataset=RNADataset(val_seqs,val_labels, val_ids, val_ew, opts.path, training=False)
    dataloader = DataLoader(dataset, batch_size=opts.batch_size,
                            shuffle=True, num_workers=opts.workers)
    val_dataloader = DataLoader(val_dataset, batch_size=opts.batch_size*2,
                            shuffle=False, num_workers=opts.workers)

    # print(dataset.data.shape)
    # print(dataset.bpps[0].shape)
    # exit()
    #checkpointing
    checkpoints_folder='checkpoints_fold{}'.format((opts.fold))
    csv_file='log_fold{}.csv'.format((opts.fold))
    columns=['epoch','train_loss',
             'val_loss']
    logger=CSVLogger(columns,csv_file)

    #build model and logger
    model=NucleicTransformer(opts.ntoken, opts.nclass, opts.ninp, opts.nhead, opts.nhid,
                           opts.nlayers, opts.kmer_aggregation, kmers=opts.kmers,stride=opts.stride,
                           dropout=opts.dropout).to(device)
    optimizer=Ranger(model.parameters(), weight_decay=opts.weight_decay)
    criterion=weighted_MCRMSE
    #lr_schedule=lr_AIAYN(optimizer,opts.ninp,opts.warmup_steps,opts.lr_scale)

    # Mixed precision initialization
    opt_level = 'O1'
    #model, optimizer = amp.initialize(model, optimizer, opt_level=opt_level)
    model = nn.DataParallel(model)
    pretrained_df=pd.read_csv('pretrain.csv')
    #print(pretrained_df.epoch[-1])
    model.load_state_dict(torch.load('pretrain_weights/epoch{}.ckpt'.format(int(pretrained_df.iloc[-1].epoch))))

    pytorch_total_params = sum(p.numel() for p in model.parameters())
    print('Total number of paramters: {}'.format(pytorch_total_params))


    #distance_mask=get_distance_mask(107)
    #distance_mask=torch.tensor(distance_mask).float().to(device).reshape(1,107,107)
    #print("Starting training for fold {}/{}".format(opts.fold,opts.nfolds))
    #training loop
    cos_epoch=int(opts.epochs*0.75)-1
    lr_schedule=torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,(opts.epochs-cos_epoch)*len(dataloader))
    for epoch in range(opts.epochs):
        model.train(True)
        t=time.time()
        total_loss=0
        optimizer.zero_grad()
        train_preds=[]
        ground_truths=[]
        step=0
        for data in dataloader:
        #for step in range(1):
            step+=1
            #lr=lr_schedule.step()
            lr=get_lr(optimizer)
            #print(lr)
            src=data['data'].to(device)
            labels=data['labels']
            bpps=data['bpp'].to(device)
            #print(bpps.shape[1])
            # bpp_selection=np.random.randint(bpps.shape[1])
            # bpps=bpps[:,bpp_selection]
            # src=src[:,bpp_selection]

            # print(bpps.shape)
            # print(src.shape)
            # exit()

            # print(bpps.shape)
            # exit()
            #src=mutate_rna_input(src,opts.nmute)
            #src=src.long()[:,np.random.randint(2)]
            labels=labels.to(device)#.float()
            output=model(src,bpps)
            ew=data['ew'].to(device)
            #print(output.shape)
            #print(labels.shape)
            loss=criterion(output[:,:68],labels,ew).mean()

            # with amp.scale_loss(loss, optimizer) as scaled_loss:
            #    scaled_loss.backward()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
            optimizer.step()
            optimizer.zero_grad()
            total_loss+=loss
            print ("Epoch [{}/{}], Step [{}/{}] Loss: {:.3f} Lr:{:.6f} Time: {:.1f}"
                           .format(epoch+1, opts.epochs, step+1, len(dataloader), total_loss/(step+1) , lr,time.time()-t),end='\r',flush=True) #total_loss/(step+1)
            #break
            if epoch > cos_epoch:
                lr_schedule.step()
        print('')
        train_loss=total_loss/(step+1)
        #recon_acc=np.sum(recon_preds==true_seqs)/len(recon_preds)
        torch.cuda.empty_cache()
        if (epoch+1)%opts.val_freq==0 and epoch > cos_epoch:
        #if (epoch+1)%opts.val_freq==0:
            val_loss=validate(model,device,val_dataloader,batch_size=opts.batch_size)
            to_log=[epoch+1,train_loss,val_loss,]
            logger.log(to_log)


        if (epoch+1)%opts.save_freq==0:
            save_weights(model,optimizer,epoch,checkpoints_folder)

        # if epoch == cos_epoch:
        #     print('yes')


    get_best_weights_from_fold(opts.fold)
Exemplo n.º 22
0
        model = GCVAE(
            args,
            n_r,
            n_e,
            dataset,
        ).to(device)
    elif model_name == 'GCVAE2':
        model = GCVAE2(args, n_r, n_e, dataset).to(device)
    elif model_name == 'GVAE':
        model = GVAE(args, n_r, n_e, dataset).to(device)
    else:
        raise ValueError('{} not defined!'.format(model_name))

    optimizer = Ranger(model.parameters(),
                       lr=args['lr'],
                       k=args['k'] if 'k' in args else 9,
                       betas=(.95, 0.999),
                       use_gc=True,
                       gc_conv_only=False)
    wandb.watch(model)

    # Load model
    if args['load_model']:
        # model.load_state_dict(torch.load(model_path, map_location=torch.device(device))['model_state_dict'])
        model.load_state_dict(
            torch.load(args['load_model_path'],
                       map_location=torch.device(device))['model_state_dict'])
        print('Saved model loaded.')

    # Train model
    if args['train']:
        train_eval_vae(batch_size, args['epochs'], train_set[:limit],
Exemplo n.º 23
0
def train(args):
    # get configs
    epochs = args.epoch
    dim = args.dim
    lr = args.lr
    weight_decay = args.l2
    head_num = args.head_num
    device = args.device
    act = args.act
    fusion = args.fusion
    beta = args.beta
    alpha = args.alpha
    use_self = args.use_self
    agg = args.agg
    model = DATE(leaf_num,importer_size,item_size,\
                                    dim,head_num,\
                                    fusion_type=fusion,act=act,device=device,\
                                    use_self=use_self,agg_type=agg,
                                    ).to(device)
    model = nn.DataParallel(model,device_ids=[0,1])

    # initialize parameters
    for p in model.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)

    # optimizer & loss 
    optimizer = Ranger(model.parameters(), weight_decay=weight_decay,lr=lr)
    cls_loss_func = nn.BCELoss()
    reg_loss_func = nn.MSELoss()

    # save best model
    global_best_score = 0
    model_state = None

    # early stop settings 
    stop_rounds = 3
    no_improvement = 0
    current_score = None 

    for epoch in range(epochs):
        for step, (batch_feature,batch_user,batch_item,batch_cls,batch_reg) in enumerate(train_loader):
            model.train() # prep to train model
            batch_feature,batch_user,batch_item,batch_cls,batch_reg =  \
            batch_feature.to(device), batch_user.to(device), batch_item.to(device),\
             batch_cls.to(device), batch_reg.to(device)
            batch_cls,batch_reg = batch_cls.view(-1,1), batch_reg.view(-1,1)

            # model output
            classification_output, regression_output, hidden_vector = model(batch_feature,batch_user,batch_item)

            # FGSM attack
            adv_vector = fgsm_attack(model,cls_loss_func,hidden_vector,batch_cls,0.01)
            adv_output = model.module.pred_from_hidden(adv_vector) 

            # calculate loss
            adv_loss_func = nn.BCELoss(weight=batch_cls) 
            adv_loss = beta * adv_loss_func(adv_output,batch_cls) 
            cls_loss = cls_loss_func(classification_output,batch_cls)
            revenue_loss = alpha * reg_loss_func(regression_output, batch_reg)
            loss = cls_loss + revenue_loss + adv_loss
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if (step+1) % 1000 ==0:  
                print("CLS loss:%.4f, REG loss:%.4f, ADV loss:%.4f, Loss:%.4f"\
                %(cls_loss.item(),revenue_loss.item(),adv_loss.item(),loss.item()))
                
        # evaluate 
        model.eval()
        print("Validate at epoch %s"%(epoch+1))
        y_prob, val_loss = model.module.eval_on_batch(valid_loader)
        y_pred_tensor = torch.tensor(y_prob).float().to(device)
        best_threshold, val_score, roc = torch_threshold(y_prob,xgb_validy)
        overall_f1, auc, precisions, recalls, f1s, revenues = metrics(y_prob,xgb_validy,revenue_valid)
        select_best = np.mean(f1s)
        print("Over-all F1:%.4f, AUC:%.4f, F1-top:%.4f" % (overall_f1, auc, select_best) )

        print("Evaluate at epoch %s"%(epoch+1))
        y_prob, val_loss = model.module.eval_on_batch(test_loader)
        y_pred_tensor = torch.tensor(y_prob).float().to(device)
        overall_f1, auc, precisions, recalls, f1s, revenues = metrics(y_prob,xgb_testy,revenue_test,best_thresh=best_threshold)
        print("Over-all F1:%.4f, AUC:%.4f, F1-top:%.4f" %(overall_f1, auc, np.mean(f1s)) )

        # save best model 
        if select_best > global_best_score:
            global_best_score = select_best
            torch.save(model,model_path)
        
         # early stopping 
        if current_score == None:
            current_score = select_best
            continue
        if select_best < current_score:
            current_score = select_best
            no_improvement += 1
        if no_improvement >= stop_rounds:
            print("Early stopping...")
            break 
        if select_best > current_score:
            no_improvement = 0
            current_score = None
Exemplo n.º 24
0
    def init_model(
        self,
        dropout,
        embed_dim=400,
        vector_path=None,
        word_hidden=None,
        sent_hidden=None,
        word_encoder="gru",
        sent_encoder="gru",
        dim_caps=16,
        num_caps=25,
        num_compressed_caps=100,
        dropout_caps=0.2,
        lambda_reg_caps=0.0005,
        pos_weight=None,
        nhead_doc=5,
        ulmfit_pretrained_path=None,
        dropout_factor_ulmfit=1.0,
        binary_class=True,
        KDE_epsilon=0.05,
        num_cycles_lr=5,
        lr_div_factor=10,
    ):

        self.embed_size = embed_dim
        self.word_hidden = word_hidden
        self.sent_hidden = sent_hidden
        self.dropout = dropout
        self.word_encoder = word_encoder
        self.sent_encoder = sent_encoder
        self.ulmfit_pretrained_path = ulmfit_pretrained_path
        self.binary_class = binary_class
        self.num_cycles_lr = num_cycles_lr
        self.lr_div_factor = lr_div_factor

        # Initialize model and load pretrained weights if given
        self.logger.info("Building model...")
        if self.model_name.lower() == "han":
            self.model = HAN(
                self.vocab_size,
                embed_dim,
                word_hidden,
                sent_hidden,
                self.num_labels,
                dropout=dropout,
                word_encoder=word_encoder,
                sent_encoder=sent_encoder,
                ulmfit_pretrained_path=ulmfit_pretrained_path,
                dropout_factor_ulmfit=dropout_factor_ulmfit,
            )  # TODO: also adapt for other models
        elif self.model_name.lower() == "hgrulwan":
            self.model = HGRULWAN(
                self.vocab_size,
                embed_dim,
                word_hidden,
                sent_hidden,
                self.num_labels,
                dropout=dropout,
                word_encoder=word_encoder,
                ulmfit_pretrained_path=ulmfit_pretrained_path,
                dropout_factor_ulmfit=dropout_factor_ulmfit,
            )
        elif self.model_name.lower() == "hcapsnet":
            self.model = HCapsNet(
                self.vocab_size,
                embed_dim,
                word_hidden,
                sent_hidden,
                self.num_labels,
                dropout=dropout,
                word_encoder=word_encoder,
                sent_encoder=sent_encoder,
                dropout_caps=dropout_caps,
                dim_caps=dim_caps,
                num_caps=num_caps,
                num_compressed_caps=num_compressed_caps,
                ulmfit_pretrained_path=ulmfit_pretrained_path,
                dropout_factor_ulmfit=dropout_factor_ulmfit,
                lambda_reg_caps=lambda_reg_caps,
                binary_class=binary_class,
                KDE_epsilon=KDE_epsilon,
            )
        elif self.model_name.lower() == "HierarchicalAttentionCapsNet".lower():
            self.model = HierarchicalAttentionCapsNet(
                num_tokens=self.vocab_size,
                embed_size=embed_dim,
                word_hidden=word_hidden,
                num_classes=self.num_labels,
                dropout=dropout,
                word_encoder=word_encoder,
                sent_encoder=sent_encoder,
                dropout_caps=dropout_caps,
                dim_caps=dim_caps,
                num_caps=num_caps,
                num_compressed_caps=num_compressed_caps,
                nhead_doc=nhead_doc,
                ulmfit_pretrained_path=ulmfit_pretrained_path,
                dropout_factor_ulmfit=dropout_factor_ulmfit,
                lambda_reg_caps=lambda_reg_caps,
                binary_class=binary_class,
                KDE_epsilon=KDE_epsilon,
            )

        if binary_class:
            # Initialize training attributes
            if "caps" in self.model_name.lower():
                # self.criterion = FocalLoss()
                self.criterion = MarginLoss(0.9, 0.1, 0.5, True)
                # self.criterion = torch.nn.BCELoss()
            else:
                if pos_weight:
                    pos_weight = torch.tensor(pos_weight).to(self.device)
                self.criterion = torch.nn.BCEWithLogitsLoss(
                    pos_weight=pos_weight, reduction="mean"
                )
        else:
            if "caps" in self.model_name.lower():
                self.criterion = MarginLoss(0.9, 0.1, 0.5, False)
                # torch.nn.CrossEntropyLoss(reduction='mean')
            else:
                self.criterion = torch.nn.CrossEntropyLoss(reduction="mean")

        # Load embeddings
        if word_encoder.lower() != "ulmfit":
            # initialize optimizer
            params = self.model.parameters()
            # get word embeddings
            vectors = fasttext.load_model(vector_path)

            embed_table = get_embedding(vectors, self.word_to_idx, embed_dim)
            self.model.set_embedding(embed_table)
        else:
            # intialize per-layer lr for ULMFiT
            params = [
                {
                    "params": self.model.sent_encoder.word_encoder.parameters(),
                    "lr": self.lr / self.lr_div_factor,
                },
                {"params": self.model.caps_classifier.parameters()},
                {"params": self.model.doc_encoder.parameters()},
                {"params": self.model.sent_encoder.weight_W_word.parameters()},
                {"params": self.model.sent_encoder.weight_proj_word},
            ]

        self.optimizer = Ranger(params, lr=self.lr, betas=(0.95,0.99), eps=1e-6)
        self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            self.optimizer,
            T_max=self.steps_per_epoch * self.num_epochs
        )
        # self.optimizer = RAdam(params, lr=self.lr, weight_decay=self.weight_decay)
        # self.optimizer = AdamW(params, lr=self.lr, weight_decay=self.weight_decay)
        #
        # self.scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(
        #     self.optimizer,
        #     num_warmup_steps=self.steps_per_epoch,
        #     num_training_steps=self.steps_per_epoch * self.num_epochs,
        #     num_cycles=self.num_cycles_lr,
        # )

        if self.keep_ulmfit_frozen:  # Freeze ulmfit completely
            self.model.sent_encoder.word_encoder.freeze_to(-1)

        if torch.cuda.device_count() > 1:
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            self.model = MyDataParallel(self.model)
        self.model.to(self.device)
    """
    if args.resume:
        load_name = os.path.join("./model/ATCDNet_2020-03-02_20.pth")
        print("loading checkpoint %s" %(laod_name))
        net = torch.load(load_name)
        arg.start_epoch = net['epoch']
    """
    #if arg.pretrain:
    #    print("The ckp has been loaded sucessfully ")
    #net = torch.load("./model/MSAANet_2020-03-31_87.pth") # load the pretrained model
    #criterion = FocalLoss2d().to(device)
    criterion = torch.nn.BCELoss().to(device)
    #criterion = torch.nn.CrossEntropyLoss().to(device)
    train_loader, val_loader = get_dataset_loaders(5, batch_size)
    #opt = torch.optim.SGD(net.parameters(), lr=learning_rate)
    opt = Ranger(net.parameters(),lr=learning_rate)
    today=str(datetime.date.today())
    logger = get_log(model_name + today +'_log.txt')
    #scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(opt, T_max=5,eta_min=4e-08)
    #scheduler = LR_Scheduler(args.lr_scheduler, args.lr,
    #                                        args.n_epoch, len(train_loader), logger=logger,
    #                                        lr_step=args.lr_step)
    #
    scheduler = PolynomialLRDecay(opt, max_decay_steps=100, end_learning_rate=0.0001, power=2.0)



    for epoch in range(num_epochs):
        logger.info("Epoch: {}/{}".format(epoch + 1, num_epochs))
        scheduler.step()
        #scheduler(opt,i,.step()
Exemplo n.º 26
0
 def configure_optimizers(self):
     opt = Ranger(self.parameters(), lr=self.lr)
     return opt
Exemplo n.º 27
0
    def __init__(self,
                 model,
                 optim,
                 loss,
                 lr,
                 bs,
                 name,
                 shape=512,
                 crop_type=0):
        self.num_workers = 4
        self.batch_size = {"train": bs, "val": 1}
        self.accumulation_steps = bs // self.batch_size['train']
        self.lr = lr
        self.loss = loss
        self.optim = optim
        self.num_epochs = 0
        self.best_dice = 0.
        self.best_lb_metric = 0.
        self.phases = ["train", "val"]
        self.device = torch.device("cuda:0")
        torch.set_default_tensor_type("torch.cuda.FloatTensor")
        self.net = model
        self.name = name
        self.do_cutmix = True

        if self.loss == 'BCE':
            self.criterion = torch.nn.BCEWithLogitsLoss()
        elif self.loss == 'BCE+DICE':
            self.criterion = BCEDiceLoss(threshold=None)  #MODIFIED
        elif self.loss == 'TVERSKY':
            self.criterion = Tversky()
        elif self.loss == 'Dice' or self.loss == 'DICE':
            self.criterion = DiceLoss()
        elif self.loss == 'BCE+DICE+JACCARD':
            self.criterion = BCEDiceJaccardLoss(threshold=None)
        else:
            raise (Exception(
                f'{self.loss} is not recognized. Please provide a valid loss function.'
            ))

        # Optimizers
        if self.optim == 'Over9000':
            self.optimizer = Over9000(self.net.parameters(), lr=self.lr)
        elif self.optim == 'Adam':
            self.optimizer = torch.optim.Adam(self.net.parameters(),
                                              lr=self.lr)
        elif self.optim == 'RAdam':
            self.optimizer = Radam(self.net.parameters(), lr=self.lr)
        elif self.optim == 'Ralamb':
            self.optimizer = Ralamb(self.net.parameters(), lr=self.lr)
        elif self.optim == 'Ranger':
            self.optimizer = Ranger(self.net.parameters(), lr=self.lr)
        elif self.optim == 'LookaheadAdam':
            self.optimizer = LookaheadAdam(self.net.parameters(), lr=self.lr)
        else:
            raise (Exception(
                f'{self.optim} is not recognized. Please provide a valid optimizer function.'
            ))

        self.scheduler = ReduceLROnPlateau(self.optimizer,
                                           factor=0.5,
                                           mode="min",
                                           patience=4,
                                           verbose=True,
                                           min_lr=1e-5)
        self.net = self.net.to(self.device)
        cudnn.benchmark = True

        self.dataloaders = {
            phase: provider(
                phase=phase,
                shape=shape,
                crop_type=crop_type,
                batch_size=self.batch_size[phase],
                num_workers=self.num_workers if phase == 'train' else 0,
            )
            for phase in self.phases
        }
        self.losses = {phase: [] for phase in self.phases}
        self.iou_scores = {phase: [] for phase in self.phases}
        self.dice_scores = {phase: [] for phase in self.phases}
        self.F2_scores = {phase: [] for phase in self.phases}
        self.lb_metric = {phase: [] for phase in self.phases}
Exemplo n.º 28
0
class Brain(object):
    """
    High-level model logic and tuning nuggets encapsulated.

    Based on efficientNet: https://arxiv.org/abs/1905.11946
    fine tuning the efficientnet for classification and object detection
    in this implementation, no weights are frozen
    ideally, batchnorm layers can be frozen for marginal training speed increase
    """

    def __init__(self, gradient_accum_steps=5, lr=0.0005, epochs=100, n_class=2, lmb=30):
        self.device = self.set_cuda_device()
        self.net = EFN_Classifier("tf_efficientnet_b1_ns", n_class).to(self.device)
        self.loss_function = nn.MSELoss()
        self.clf_loss_function = nn.CrossEntropyLoss()
        self.optimizer = Ranger(self.net.parameters(), lr=lr, weight_decay=0.999, betas=(0.9, 0.999))
        self.scheduler = CosineAnnealingLR(self.optimizer, epochs * 0.5, lr * 0.0001)
        self.scheduler.last_epoch = epochs
        self.scaler = GradScaler()
        self.epochs = epochs
        self.gradient_accum_steps = gradient_accum_steps
        self.lmb = lmb

    @staticmethod
    def set_cuda_device():
        if torch.cuda.is_available():
            device = torch.device("cuda:0")
            logging.info(f"Running on {torch.cuda.get_device_name()}")
        else:
            device = torch.device("cpu")
            logging.info("Running on a CPU")
        return device

    def run_training_loop(self, train_dataloader, valid_dataloader, model_filename):
        best_loss = float("inf")

        for epoch in range(self.epochs):
            if epoch != 0 and epoch > 0.5 * self.epochs:  # cosine anneal the last 50% of epochs
                self.scheduler.step()
            logging.info(f"Epoch {epoch+1}")

            logging.info("Training")
            train_losses, train_accuracies, train_miou = self.forward_pass(train_dataloader, train=True)

            logging.info("Validating")
            val_losses, val_accuracies, val_miou = self.forward_pass(valid_dataloader)

            logging.info(
                f"Training accuracy: {sum(train_accuracies)/len(train_accuracies):.2f}"
                f" | Training loss: {sum(train_losses)/len(train_losses):.2f}"
                f" | Training mIoU: {sum(train_miou)/len(train_miou):.2f}"
            )
            logging.info(
                f"Validation accuracy: {sum(val_accuracies)/len(val_accuracies):.2f}"
                f" | Validation loss: {sum(val_losses)/len(val_losses):.2f}"
                f" | Validation mIoU: {sum(val_miou)/len(val_miou):.2f}"
            )

            epoch_val_loss = sum(val_losses) / len(val_losses)

            if best_loss > epoch_val_loss:
                best_loss = epoch_val_loss
                torch.save(self.net.state_dict(), model_filename)
                logging.info(f"Saving with loss of {epoch_val_loss}, improved over previous {best_loss}")

    def bbox_iou(self, true_boxes, pred_boxes):
        iou_list = []
        for true_box, pred_box in zip(true_boxes, pred_boxes):

            x_left = max(true_box[0], pred_box[0]).item()
            y_top = max(true_box[1], pred_box[1]).item()

            x_right = min(true_box[2], pred_box[2]).item()
            y_bottom = min(true_box[3], pred_box[3]).item()

            if x_right < x_left or y_bottom < y_top:
                return 0.0

            overlap = (x_right - x_left) * (y_bottom - y_top)

            true_box_area = (true_box[2] - true_box[0]) * (true_box[3] - true_box[1])
            pred_box_area = (pred_box[2] - pred_box[0]) * (pred_box[3] - pred_box[1])
            iou = overlap / float(true_box_area + pred_box_area - overlap)
            iou_list.append(iou)

        iou = torch.tensor(iou)
        iou = torch.mean(iou)

        return iou

    def draw_boxes(self, images, bboxes, labels):
        label_dict = {0: "Cat", 1: "Dog"}

        for batch in zip(images, bboxes, labels):
            cv2.destroyAllWindows()
            image, bbox, label = batch[0].cpu().numpy(), batch[1].cpu().numpy(), torch.argmax(batch[2]).cpu().item()
            image = np.rollaxis(image, 0, 3)
            image = ((image - image.min()) * (1 / (image.max() - image.min()) * 255)).astype("uint8")
            image = cv2.UMat(image)

            cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), thickness=2)

            cv2.putText(
                image,
                f"{label_dict[label]}",
                (bbox[1], bbox[3]),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.5,
                (0, 255, 0),
                1,
                cv2.LINE_AA,
            )
            cv2.imshow("test", image)
            cv2.waitKey(1)
            sleep(1)
            cv2.destroyAllWindows()

    def forward_pass(self, dataloader, draw=False, train=False):
        def get_loss(inputs, bbox_labels, clf_labels):
            label_outputs, bbox_outputs = self.net(inputs)
            bbox_loss = self.loss_function(bbox_outputs, bbox_labels)
            clf_loss = self.clf_loss_function(label_outputs, clf_labels)
            loss = torch.mean(bbox_loss + clf_loss * self.lmb)
            return loss, label_outputs, bbox_outputs

        if train:
            self.net.train()
        else:
            self.net.eval()

        losses = []
        accuracies = []
        miou = []

        for step, batch in enumerate(dataloader):
            inputs = batch[0].to(self.device).float()
            labels = batch[1].to(self.device).float()

            # splitting labels for separate loss calculation
            bbox_labels = labels[:, :4]
            clf_labels = labels[:, 4:].long()
            clf_labels = clf_labels[:, 0]

            with autocast():
                if train:
                    loss, label_outputs, bbox_outputs = get_loss(inputs, bbox_labels, clf_labels)
                    self.scaler.scale(loss).backward()
                else:
                    with torch.no_grad():
                        loss, label_outputs, bbox_outputs = get_loss(inputs, bbox_labels, clf_labels)
                    if draw:
                        self.draw_boxes(inputs, bbox_outputs, label_outputs)

            matches = [torch.argmax(i) == j for i, j in zip(label_outputs, clf_labels)]
            acc = matches.count(True) / len(matches)
            iou = self.bbox_iou(bbox_labels, bbox_outputs)

            miou.append(iou)
            losses.append(loss)
            accuracies.append(acc)

            if train and (step + 1) % self.gradient_accum_steps == 0:
                # gradient accumulation to train with bigger effective batch size
                # with less memory use
                # fp16 is used to speed up training and reduce memory consumption
                self.scaler.step(self.optimizer)
                self.scaler.update()
                self.optimizer.zero_grad()
                logging.info(
                    f"Step {step} of {len(dataloader)},\t"
                    f"Accuracy: {sum(accuracies)/len(accuracies):.2f},\t"
                    f"mIoU: {sum(miou)/len(miou):.2f},\t"
                    f"Loss: {sum(losses)/len(losses):.2f}"
                )

        return losses, accuracies, miou
Exemplo n.º 29
0
def main():

    SEED = 1234

    torch.manual_seed(SEED)
    torch.cuda.manual_seed(SEED)
    torch.backends.cudnn.deterministic = True

    TEXT = data.Field(lower=True, batch_first=True, tokenize='spacy')
    LABEL = data.LabelField(dtype=torch.float)

    train_data, test_data = datasets.IMDB.splits(TEXT,
                                                 LABEL,
                                                 root='/tmp/imdb/')
    train_data, valid_data = train_data.split(split_ratio=0.8,
                                              random_state=random.seed(SEED))

    TEXT.build_vocab(train_data,
                     vectors=GloVe(name='6B', dim=100, cache='/tmp/glove/'),
                     unk_init=torch.Tensor.normal_)

    LABEL.build_vocab(train_data)

    BATCH_SIZE = 64

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
        (train_data, valid_data, test_data),
        batch_size=BATCH_SIZE,
        device=device)
    vocab_size, embedding_dim = TEXT.vocab.vectors.shape

    class SentimentAnalysisCNN(nn.Module):
        def __init__(self,
                     vocab_size,
                     embedding_dim,
                     kernel_sizes,
                     num_filters,
                     num_classes,
                     d_prob,
                     mode,
                     use_drop=False):
            """
            Args:
                vocab_size : int - size of vocabulary in dictionary
                embedding_dim : int - the dimension of word embedding vector
                kernel_sizes : list of int - sequence of sizes of kernels in this architecture
                num_filters : how many filters used for each layers
                num_classes : int - number of classes to classify
                d_prob: probability for dropout layer
                mode:  one of :
                        static      : pretrained weights, non-trainable
                        nonstatic   : pretrained weights, trainable
                        rand        : random init weights
                use_drop : use drop or not in this class
            """
            super(SentimentAnalysisCNN, self).__init__()
            self.vocab_size = vocab_size
            self.embedding_dim = embedding_dim
            self.kernel_sizes = kernel_sizes
            self.num_filters = num_filters
            self.num_classes = num_classes
            self.d_prob = d_prob
            self.mode = mode
            self.embedding = nn.Embedding(vocab_size,
                                          embedding_dim,
                                          padding_idx=1)
            self.load_embeddings()
            self.conv = nn.ModuleList([
                nn.Sequential(
                    nn.Conv1d(in_channels=embedding_dim,
                              out_channels=num_filters,
                              kernel_size=k,
                              stride=1), nn.Dropout(p=0.5, inplace=True))
                for k in kernel_sizes
            ])
            self.use_drop = use_drop
            if self.use_drop:
                self.dropout = nn.Dropout(d_prob)
            self.fc = nn.Linear(len(kernel_sizes) * num_filters, num_classes)

        def forward(self, x):
            batch_size, sequence_length = x.shape
            x = self.embedding(x).transpose(1, 2)
            x = [F.relu(conv(x)) for conv in self.conv]
            x = [F.max_pool1d(c, c.size(-1)).squeeze(dim=-1) for c in x]
            x = torch.cat(x, dim=1)
            if self.use_drop:
                x = self.fc(self.dropout(x))
            x = self.fc(x)
            return torch.sigmoid(x).squeeze()

        def load_embeddings(self):
            if 'static' in self.mode:
                self.embedding.weight.data.copy_(TEXT.vocab.vectors)
                if 'non' not in self.mode:
                    self.embedding.weight.data.requires_grad = False
                    print(
                        'Loaded pretrained embeddings, weights are not trainable.'
                    )
                else:
                    self.embedding.weight.data.requires_grad = True
                    print(
                        'Loaded pretrained embeddings, weights are trainable.')
            elif self.mode == 'rand':
                print('Randomly initialized embeddings are used.')
            else:
                raise ValueError(
                    'Unexpected value of mode. Please choose from static, nonstatic, rand.'
                )

    model = SentimentAnalysisCNN(
        vocab_size=vocab_size,  #pkgmodel
        embedding_dim=embedding_dim,
        kernel_sizes=[3, 4, 5],
        num_filters=100,
        num_classes=1,
        d_prob=0.5,
        mode='static')
    model.to(device)
    ## switch back and forth the two optimizers
    # optimizer = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-3)

    ## optimizer provide better performance but get overfitting quickly
    optimizer = Ranger(model.parameters(), weight_decay=0.1)

    criterion = nn.BCELoss()

    def process_function(engine, batch):
        model.train()
        optimizer.zero_grad()
        x, y = batch.text, batch.label
        y_pred = model(x)
        loss = criterion(y_pred, y)
        loss.backward()
        optimizer.step()
        return loss.item()

    def eval_function(engine, batch):
        model.eval()
        with torch.no_grad():
            x, y = batch.text, batch.label
            y_pred = model(x)
            return y_pred, y

    trainer = Engine(process_function)
    train_evaluator = Engine(eval_function)
    validation_evaluator = Engine(eval_function)

    RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')

    def thresholded_output_transform(output):
        y_pred, y = output
        y_pred = torch.round(y_pred)
        return y_pred, y

    Accuracy(output_transform=thresholded_output_transform).attach(
        train_evaluator, 'accuracy')
    Loss(criterion).attach(train_evaluator, 'bce')

    Accuracy(output_transform=thresholded_output_transform).attach(
        validation_evaluator, 'accuracy')
    Loss(criterion).attach(validation_evaluator, 'bce')

    pbar = ProgressBar(persist=True, bar_format="")
    pbar.attach(trainer, ['loss'])

    def score_function(engine):
        val_loss = engine.state.metrics['bce']
        return -val_loss

    handler = EarlyStopping(patience=5,
                            score_function=score_function,
                            trainer=trainer)
    validation_evaluator.add_event_handler(Events.COMPLETED, handler)

    @trainer.on(Events.EPOCH_COMPLETED)
    def log_training_results(engine):
        train_evaluator.run(train_iterator)
        metrics = train_evaluator.state.metrics
        avg_accuracy = metrics['accuracy']
        avg_bce = metrics['bce']
        pbar.log_message(
            "Training Results - Epoch: {}  Avg accuracy: {:.2f} Avg loss: {:.2f}"
            .format(engine.state.epoch, avg_accuracy, avg_bce))

    @trainer.on(Events.EPOCH_COMPLETED)
    def log_validation_results(engine):
        validation_evaluator.run(valid_iterator)
        metrics = validation_evaluator.state.metrics
        avg_accuracy = metrics['accuracy']
        avg_bce = metrics['bce']
        pbar.log_message(
            "Validation Results - Epoch: {}  Avg accuracy: {:.2f} Avg loss: {:.2f}"
            .format(engine.state.epoch, avg_accuracy, avg_bce))
        pbar.n = pbar.last_print_n = 0

    checkpointer = ModelCheckpoint('/tmp/models',
                                   'textcnn_ranger_wd_0_1',
                                   save_interval=1,
                                   n_saved=2,
                                   create_dir=True,
                                   save_as_state_dict=True,
                                   require_empty=False)
    trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpointer,
                              {'textcnn_ranger_wd_0_1': model})
    # trainer.add_event_handler(Events.EPOCH_COMPLETED, log_validation_results)

    trainer.run(train_iterator, max_epochs=20)
Exemplo n.º 30
0
# Tree
trees = []

start_time = time.time()
fps = 60
frame_time = 1.0/fps

# Ranger
# Button sensibility when planting
button_time = 30
plant_time = 30
# Speed and movement
unit = 1
ranger_speed = 10
# The ranger object
ranger = Ranger(window_h, window_w, ranger_speed)

# Cutter
cutter_time = 50
tree_cutting_time = 50
cutter_speed = 4
cutter = []

# Difficulty level
cutter_difficulty = 300

# Draw background
screen.fill(bg_color)
# Draw everything on top of that
number_trees = 80
while number_trees != 0:
Exemplo n.º 31
0
    def __init__(self,
                 model,
                 path,
                 img_ext,
                 mask_ext,
                 save_path,
                 optim,
                 loss,
                 lr,
                 bs,
                 name,
                 shape=256,
                 crop_type=0):
        self.num_workers = 4
        self.save_path = save_path
        self.batch_size = {"train": bs, "val": 1}
        self.accumulation_steps = bs // self.batch_size['train']
        self.lr = lr
        self.path = path
        self.img_ext = img_ext
        self.mask_ext = mask_ext
        self.loss = loss
        self.optim = optim
        self.num_epochs = 0
        self.best_val_loss = 1
        self.best_val_dice = 0
        self.best_val_iou = 0
        self.phases = ["train", "val"]
        self.device = torch.device("cuda:0")
        torch.set_default_tensor_type("torch.cuda.FloatTensor")
        self.net = model
        self.name = name
        self.do_cutmix = True
        self.loss_classification = torch.nn.CrossEntropyLoss()
        if self.loss == 'BCE+DICE+IOU':
            self.criterion = BCEDiceJaccardLoss(threshold=None)
        else:
            raise (Exception(
                f'{self.loss} is not recognized. Please provide a valid loss function.'
            ))

        if self.optim == 'Ranger':
            self.optimizer = Ranger(self.net.parameters(), lr=self.lr)
        elif self.optim == 'LookaheadAdam':
            self.optimizer = LookaheadAdam(self.net.parameters(), lr=self.lr)
        else:
            raise (Exception(
                f'{self.optim} is not recognized. Please provide a valid optimizer function.'
            ))

        self.scheduler = ReduceLROnPlateau(self.optimizer,
                                           factor=0.5,
                                           mode="min",
                                           patience=4,
                                           verbose=True,
                                           min_lr=1e-5)
        self.net = self.net.to(self.device)
        cudnn.benchmark = True

        self.dataloaders = {
            phase: provider(
                phase=phase,
                path=self.path,
                img_ext=self.img_ext,
                mask_ext=self.mask_ext,
                num_workers=0,
            )
            for phase in self.phases
        }
        self.losses = {phase: [] for phase in self.phases}
        self.dice_scores = {phase: [] for phase in self.phases}
        self.iou_scores = {phase: [] for phase in self.phases}
        self.f2_scores = {phase: [] for phase in self.phases}