Esempio n. 1
0
    def train(self, ts, cm, batchsz=1):
        self.model.train()

        start_time = time.time()

        steps = int(math.floor(len(ts) / float(batchsz)))

        shuffle = np.random.permutation(np.arange(steps))
        pg = ProgressBar(steps)
        cm.reset()

        total_loss = 0
        for i in range(steps):
            self.optimizer.zero_grad()
            si = shuffle[i]
            x, y = self._batch(ts, si, batchsz)
            pred = self.model(x)
            loss = self.crit(pred, y)
            total_loss += loss.data[0]
            loss.backward()
            self._add_to_cm(cm, y, pred)
            self.optimizer.step()
            pg.update()
        pg.done()

        duration = time.time() - start_time
        total_corr = cm.get_correct()
        total = cm.get_total()

        print('Train (Loss %.4f) (Acc %d/%d = %.4f) (%.3f sec)' %
              (float(total_loss) / total, total_corr, total,
               float(total_corr) / total, duration))
        print(cm)
Esempio n. 2
0
    def train(self, ts):
        self.model.train()

        start_time = time.time()

        steps = int(len(ts))
        shuffle = np.random.permutation(np.arange(steps))

        total_loss = total_corr = total = 0
        pg = ProgressBar(steps)
        for i in range(steps):
            self.optimizer.zero_grad()
            si = shuffle[i]
            src, dst, tgt = self._wrap(ts[si])
            pred = self.model((src, dst))
            loss = self.crit(pred, tgt)
            total_loss += loss.data[0]
            loss.backward()

            total_corr += self._right(pred, tgt)
            total += self._total(tgt)
            self.optimizer.step()
            pg.update()
        pg.done()
        duration = time.time() - start_time

        avg_loss = float(total_loss) / total

        print(
            'Train (Loss %.4f) (Perplexity %.4f) (Acc %d/%d = %.4f) (%.3f sec)'
            % (avg_loss, np.exp(avg_loss), total_corr, total,
               float(total_corr) / total, duration))
Esempio n. 3
0
 def make_pdb(self, bar_msg=''):
     """
     Returns a pdb-like formatted string. bar_msg is a string with message to show at ProgressBar initialization.
     bar_msg = '' disables the bar.
     :param bar_msg: str
     :return: str
     """
     models = self.models()
     if bar_msg:
         bar = ProgressBar(len(models), bar_msg)
     else:
         bar = None
     if len(models) == 1:
         s = self.__repr__()
     else:
         s = ''
         for m in models:
             s += 'MODEL%9i\n' % m[0].model
             s += m.__repr__()
             s += '\nENDMDL\n'
             if bar:
                 bar.update()
     if bar:
         bar.done(False)
     return s
Esempio n. 4
0
    def train(self, ts, batchsz):
        self.model.train()

        start_time = time.time()

        steps = int(math.floor(len(ts) / float(batchsz)))
        shuffle = np.random.permutation(np.arange(steps))
        total_loss = total = 0
        pg = ProgressBar(steps)
        for i in range(steps):
            self.optimizer.zero_grad()

            si = shuffle[i]
            ts_i = data.batch(ts, si, batchsz, long_tensor_alloc, tensor_shape,
                              tensor_max)
            src, dst, tgt = self._wrap(ts_i)
            pred = self.model((src, dst))
            loss = self.crit(pred, tgt)
            total_loss += loss.data[0]
            loss.backward()
            torch.nn.utils.clip_grad_norm(self.model.parameters(), self.clip)

            total += self._total(tgt)
            self.optimizer.step()
            pg.update()
        pg.done()
        duration = time.time() - start_time

        avg_loss = float(total_loss) / total

        print('Train (Loss %.4f) (Perplexity %.4f) (%.3f sec)' %
              (avg_loss, np.exp(avg_loss), duration))
Esempio n. 5
0
def evaluate(epoch, model, val_loader, criterion, log_path):
    model.eval()
    val_progressor = ProgressBar(log_path,
                                 mode="Val  ",
                                 epoch=epoch,
                                 total_epoch=config.epochs,
                                 model_name=config.model_name,
                                 total=len(val_loader))
    losses = AverageMeter()
    top1 = AverageMeter()

    with torch.no_grad():
        for index, (data, label) in enumerate(val_loader):
            val_progressor.current = index
            data = Variable(data).cuda()
            label = Variable(torch.from_numpy(np.asarray(label))).cuda()
            output = model(data)
            loss = criterion(output, label)

            p_top1, p_top2 = accuracy(output, label, topk=(1, 2))
            losses.update(loss.item(), data.size(0))
            top1.update(p_top1[0], data.size(0))
            val_progressor.current_loss = losses.avg
            val_progressor.current_top1 = top1.avg
            val_progressor()
            #print('epoch %d validate iteration %d: loss: %.3f' % (epoch + 1, index + 1, it_loss.data))
            #correct += (output == label).sum()
        val_progressor.done()
    return losses.avg, top1.avg
Esempio n. 6
0
    def rmsd_matrix(self, msg=''):
        """
        Calculates rmsd matrix with no fitting for all pairs od models in trajectory.
        :return: np.array
        """
        def rmsd(m1, m2, ml):
            return np.sqrt(np.sum((m1 - m2)**2) / ml)

        model_length = len(self.template)
        models = self.coordinates.reshape(-1, model_length, 3)
        dim = len(models)
        result = np.zeros((dim, dim))
        if msg:
            bar = ProgressBar((dim * dim - dim) / 2, msg=msg)
        else:
            bar = None
        for i in range(dim):
            for j in range(i + 1, dim):
                if bar:
                    bar.update()
                result[i, j] = result[j, i] = rmsd(models[i], models[j],
                                                   model_length)
        if bar:
            bar.done(True)
        return result
Esempio n. 7
0
    def train(self, ts, cm, dropout, batchsz=1):

        total_loss = 0
        start_time = time.time()
        steps = int(math.floor(len(ts)/float(batchsz)))
        shuffle = np.random.permutation(np.arange(steps))
        pg = ProgressBar(steps)
        cm.reset()

        for i in range(steps):
            si = shuffle[i]
            ts_i = data.batch(ts, si, batchsz)
            feed_dict = self.model.ex2dict(ts_i, 1.0-dropout)
        
            _, step, summary_str, lossv, guess = self.sess.run([self.train_op, self.global_step, self.summary_op, self.loss, self.model.best], feed_dict=feed_dict)
            self.train_writer.add_summary(summary_str, step)
            total_loss += lossv
            cm.add_batch(ts_i.y, guess)
            pg.update()

        pg.done()
        total = cm.get_total()
        total_corr = cm.get_correct()
        duration = time.time() - start_time

        print('Train (Loss %.4f) (Acc %d/%d = %.4f) (%.3f sec)' % (float(total_loss)/total, total_corr, total, float(total_corr)/total, duration))
        print(cm)
Esempio n. 8
0
def main(argv=None):
    with tf.Session() as sess:
        data_dir = FLAGS.data_dir
        files = [os.path.join(data_dir, item) for item in os.listdir(data_dir)]
        # files = random.sample(files,  800)
        images = tf.placeholder(tf.float32,
                                [None, RESIZE_FINAL, RESIZE_FINAL, 3])
        logits = inference(
            images,
            False,
            num_classes=2,
            num_blocks=[3, 4, 6, 3],  # defaults to 50-layer network
            use_bias=False,  # defaults to using batch norm
            bottleneck=True)
        init = tf.global_variables_initializer()
        resnet_variables = tf.global_variables()
        saver = tf.train.Saver(resnet_variables)
        saver.restore(sess, os.path.join(FLAGS.model_dir, FLAGS.ckpt_file))

        softmax_output = tf.nn.softmax(logits)
        if FLAGS.target:
            print('Creating output file %s' % FLAGS.target)
            output = open(os.path.join(FLAGS.data_dir, FLAGS.target), 'w')
            writer = csv.writer(output)
            writer.writerow(('file', 'label', 'score'))

        num_batches = int(math.ceil(len(files)) / MAX_BATCH_SZ)
        pg = ProgressBar(num_batches)
        # try:
        for j in range(num_batches):
            start_offset = j * MAX_BATCH_SZ
            end_offset = min((j + 1) * MAX_BATCH_SZ, len(files))

            batch_image_files = files[start_offset:end_offset]
            images_ = []
            for file in batch_image_files:
                print file
                image_buffer = tf.read_file(file)
                bbox = []
                image = image_preprocessing(image_buffer, [], False)
                images_.append(image)
            image_batch = tf.stack(images_)
            batch_results = sess.run(softmax_output,
                                     feed_dict={images: image_batch.eval()})
            batch_sz = batch_results.shape[0]

            for i in range(batch_sz):
                output_i = batch_results[i]
                best_i = np.argmax(output_i)

                best_choice = (label_list[best_i], output_i[best_i])
                if writer is not None:
                    f = batch_image_files[i]
                    writer.writerow(
                        (f, best_choice[0], '%.2f' % best_choice[1]))
            pg.update()
        pg.done()
Esempio n. 9
0
    def train(self, ts, sess, summary_writer, dropout, batchsz):
        total_loss = 0
        steps = int(math.floor(len(ts)/float(batchsz)))
        shuffle = np.random.permutation(np.arange(steps))
        start_time = time.time()
    
        pg = ProgressBar(steps)

        for i in range(steps):
            si = shuffle[i]
            ts_i = batch(ts, si, batchsz)
            feed_dict = self.model.ex2dict(ts_i, 1.0-dropout)
        
            _, step, summary_str, lossv = sess.run([self.train_op, self.global_step, self.summary_op, self.loss], feed_dict=feed_dict)
            summary_writer.add_summary(summary_str, step)
            #print(lossv, errv, totv)
            total_loss += lossv
            pg.update()

        pg.done()
        duration = time.time() - start_time
            
        print('Train (Loss %.4f) (%.3f sec)' % (total_loss/steps, duration))
Esempio n. 10
0
def main():
    weight_path = config.weights + config.model_name + os.sep + config.description + os.sep + str(
        config.fold) + os.sep
    if not os.path.exists(weight_path):
        os.makedirs(weight_path)
    log_path = config.logs + config.model_name + os.sep + config.description + os.sep + str(
        config.fold) + os.sep
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    submit_path = config.submit + config.model_name + os.sep + config.description + os.sep + str(
        config.fold) + os.sep
    if not os.path.exists(submit_path):
        os.makedirs(submit_path)

    config.write_to_log(log_path + os.sep + 'log.txt')

    #dataset preparing
    train_dataset = customDataset(config.train_data, train=True)
    val_dataset = customDataset(config.test_data, train=True)
    train_loader = DataLoader(train_dataset,
                              batch_size=config.batch_size,
                              shuffle=True,
                              pin_memory=True)
    val_loader = DataLoader(val_dataset,
                            batch_size=config.batch_size * 2,
                            shuffle=False,
                            pin_memory=False)
    #model preparing
    model = get_net(config.num_classes)
    model = DataParallel(model.cuda(), device_ids=config.gpus)
    model.train()
    #optimizer preparing
    optimizer = optim.Adam(model.parameters(),
                           lr=config.lr,
                           amsgrad=True,
                           weight_decay=config.weight_decay)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
    #loss preparing
    #criterion = nn.CrossEntropyLoss().cuda()
    criterion = FocalLoss(config.num_classes).cuda()

    train_loss = AverageMeter()
    train_top1 = AverageMeter()
    valid_loss = [np.inf, 0, 0]
    best_precision = 0

    for epoch in range(config.epochs):
        scheduler.step(epoch)
        train_progressor = ProgressBar(log_path,
                                       mode="Train",
                                       epoch=epoch,
                                       total_epoch=config.epochs,
                                       model_name=config.model_name,
                                       total=len(train_loader))
        for index, (data, label) in enumerate(train_loader):
            train_progressor.current = index
            data = Variable(data).cuda()
            label = Variable(torch.from_numpy(np.asarray(label))).cuda()

            optimizer.zero_grad()
            output = model(data)
            loss = criterion(output, label)
            loss.backward()
            optimizer.step()

            precision1_train, precision2_train = accuracy(output,
                                                          label,
                                                          topk=(1, 2))
            train_loss.update(loss.item(), data.size(0))
            train_top1.update(precision1_train[0], data.size(0))
            train_progressor.current_loss = train_loss.avg
            train_progressor.current_top1 = train_top1.avg
            train_progressor()
            #print('train epoch %d iteration %d: loss: %.3f' % (epoch + 1, index + 1, loss.data))
        train_progressor.done()
        val_loss, val_top1 = evaluate(epoch, model, val_loader, criterion,
                                      log_path)
        is_best = val_top1 > best_precision
        #print(bool(is_best))
        best_precision = max(val_top1, best_precision)
        save_checkpoint(
            {
                "epoch": epoch + 1,
                "model_name": config.model_name,
                "state_dict": model.state_dict(),
                "best_precision1": best_precision,
                "optimizer": optimizer.state_dict(),
                "fold": config.fold,
                "valid_loss": valid_loss,
            }, is_best, weight_path, log_path, epoch)