def val_loop(model, loader, cuda):
    model.eval()
    # batches = list(loader)
    preds = []
    true_labels = []
    with tqdm(total=len(loader.batch_sampler)) as pbar:
        for i, batch in enumerate(loader):
            if cuda:
                batch.cuda()
            mask = generate_mask(batch)
            logits = model(input_ids=batch.input,
                           attention_mask=mask,
                           token_type_ids=batch.token_type_ids)
            logits = logits[0]
            preds.append(logits.argmax(-1).squeeze().cpu())
            true_labels.append(batch.labels.cpu())
            pbar.update(1)
    preds = torch.cat(preds)
    y_true = torch.cat(true_labels)
    model.train()
    metric_params = {
        'average': 'weighted',
        'labels': list(range(model.config.num_labels))
    }
    return metrics(preds, y_true, metric_params)
Пример #2
0
def main():
    #read image
    img = './imgs/lena.bmp'
    img = cv2.imread(img,0).astype(np.float32)
    print('img shape: ', img.shape)
    img = torch.from_numpy(img)
    img = torch.unsqueeze(img,0)
    img = torch.unsqueeze(img,0)
    img = img / 255.0
    #read net
    #net = UNet()
    pad = 'reflection'
    net = skip(1, 1,
               num_channels_down=[128] * 5,
               num_channels_up=[128] * 5,
               num_channels_skip=[128] * 5,
               filter_size_up=3, filter_size_down=3,
               upsample_mode='nearest', filter_skip_size=1,
               need_sigmoid=True, need_bias=True, pad=pad, act_fun='LeakyReLU')
    #get optimizer
    lr = 0.0001
    optimizer = torch.optim.Adam(net.parameters(), lr = lr)
    #train parameter
    iter_num = 10000
    mask_size = (1,1,512,512)
    is_even = True
    mask = utils.generate_mask(mask_size, is_even)
    input_noise = torch.randn(*mask_size)
    is_gpu = True
    #trian
    train(net, input_noise, img, mask, optimizer,iter_num, is_gpu)
Пример #3
0
    def preProcessData(self):

        self.toi_box_batch, self.label_batch, _ = select_toi(self.toi_batch)

        self.toi_box_entity, _, self.space_list_entity = select_toi(
            self.entity_batch)
        self.word_batch_var = self.whetherUseGpu(
            Variable(torch.LongTensor(np.array(self.word_batch))),
            self.config.if_gpu)
        self.mask_batch_var = self.whetherUseGpu(
            generate_mask(self.word_batch_var.shape), self.config.if_gpu)
        self.char_batch_var = self.whetherUseGpu(
            Variable(torch.LongTensor(np.array(self.char_batch))),
            self.config.if_gpu)
        self.pos_tag_batch_var = self.whetherUseGpu(
            Variable(torch.LongTensor(np.array(self.pos_tag_batch))),
            self.config.if_gpu)
        self.gold_label_vec = self.whetherUseGpu(
            Variable(torch.LongTensor(np.hstack(self.label_batch))),
            self.config.if_gpu)
        self.entity_nested_depth = []
        for each_entiy_list in self.entity_batch:
            gt_layer = [-1] * len(each_entiy_list)
            for id in range(len(gt_layer)):
                if gt_layer[id] == -1:
                    dfs(id, each_entiy_list, gt_layer)
            self.entity_nested_depth.append(gt_layer)
        self.entity_nested_depth = np.hstack(self.entity_nested_depth)
        self.entity_nested_depth[np.where(
            self.entity_nested_depth >= self.config.nested_depth
        )] = self.config.nested_depth - 1
        self.entity_nested_depth = self.whetherUseGpu(
            Variable(torch.LongTensor(self.entity_nested_depth)),
            self.config.if_gpu)
def train_epoch(loader, model, optimizer, lr_scheduler, config, cuda):
    loss_fn = torch.nn.CrossEntropyLoss()
    with tqdm(total=len(loader.batch_sampler)) as pbar:
        epoch_loss = 0.
        for i, batch in enumerate(loader):
            if cuda:
                batch.cuda()
            optimizer.zero_grad()
            mask = generate_mask(batch)
            logits = model(input_ids=batch.input,
                           attention_mask=mask,
                           token_type_ids=batch.token_type_ids)
            logits = logits[0]
            # import pdb; pdb.set_trace()
            loss = loss_fn(logits.view(-1, config.num_labels),
                           batch.labels.view(-1))
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 10.)
            optimizer.step()
            lr_scheduler.step()

            if batch.labels.size(0) > 1:
                acc = accuracy_score(
                    batch.labels.cpu(),
                    logits.cpu().detach().argmax(-1).squeeze())
            else:
                acc = 0.
            # if torch._np.isnan(loss.item()):
            # import pdb; pdb.set_trace()
            epoch_loss += loss.item()
            # if i % config.log_interval == 0:
            wandb.log({
                "Train Accuracy": acc,
                "Train Loss": loss.item(),
                "Gradient Norm": grad_norm(model).item(),
                "Learning Rate": optimizer.param_groups[0]['lr']
            })
            pbar.set_description(
                f'global_step: {lr_scheduler.last_epoch}| loss: {loss.item():.4f}| acc: {acc*100:.1f}%| epoch_av_loss: {epoch_loss/(i+1):.4f} |'
            )
            pbar.update(1)
            if lr_scheduler.last_epoch > config.total_steps:
                break
        #  move stuff off GPU
        batch.cpu()
        logits = logits.cpu().detach().argmax(-1).squeeze()
        return epoch_loss / (i + 1)
Пример #5
0
# 1. Calibrate camera.
print('Calibrating camera')
cam_mtx, roi, mapx, mapy = calibrate_camera(dset_calib, [8, 6], square_size)

# 2. Generate one gray image by averaging gray images.
print('Generate averages gray and reference, and generate mask')
gray = generate_average_image(dset_ftp_g)

# 3. Generate one reference image by averaging references.
ref  = generate_average_image(dset_ftp_r)

# 4. Undistort gray image.
# gray = undistort_image(gray, mapx, mapy)

# 5. From gray image, determine mask and disk and rectangle properties
mask, c_disk, R_disk, c_rect, sl_rect, mask_of_disk_alone, mask_of_rect_alone = generate_mask(gray)

N_vertical_slices = int(N_images/100) + (1 - (N_images/100).is_integer() )
height_fields_dset = hdf5_output_file.create_dataset('height_fields/disk', \
        shape=(int(2*R_disk), int(2*R_disk), N_images), chunks=(64, 64, N_vertical_slices), dtype='float64')
    
# 6. Undistort reference image.
# ref  = undistort_image(ref, mapx, mapy)

resfactor = np.mean(ref*mask)/np.mean(gray*mask)

# 7. Generate (referece-gray) image.
ref_m_gray = ref - resfactor*gray

# 8. Extrapolate reference image
ref_m_gray  = gerchberg2d(ref_m_gray, mask, N_iter_max=N_iter_max)
Пример #6
0
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True

print(args.dataset, args.type, args.rate)
print("num of components:", args.ncomp)
print("nhid:", args.nhid)
print("epochs:", args.epoch)

# generate all masks for the experiment
tmpdata = LinkPredData(args.dataset)
masks = [
    generate_mask(tmpdata.features, args.rate, args.type) for _ in range(5)
]


def objective(trial):
    # Tune hyperparameters (dropout, weight decay, learning rate) using Optuna
    dropout = trial.suggest_uniform('dropout', 0., 0.1)
    lr = trial.suggest_loguniform('lr', 5e-4, 2e-2)
    weight_decay = trial.suggest_loguniform('weight_decay', 1e-10, 1e-3)

    # prepare data and model
    data = LinkPredData(args.dataset, seed=args.seed)
    apply_mask(data.features, masks[0])
    model = VGAEmf(data, args.nhid, args.latent_dim, dropout, args.ncomp)

    # run model
Пример #7
0
parser.add_argument('--nhid', default=16, type=int, help='the number of hidden units')
parser.add_argument('--dropout', default=0.5, type=float, help='dropout rate')
# parser.add_argument('--ncomp', default=5, type=int, help='the number of Gaussian components')
parser.add_argument('--lr', default=0.005, type=float, help='learning rate')
parser.add_argument('--wd', default=1e-2, type=float, help='weight decay')
parser.add_argument('--epoch', default=10000, type=int, help='the number of training epoch')
parser.add_argument('--patience', default=100, type=int, help='patience for early stopping')
parser.add_argument('--verbose', action='store_true', help='verbose')

parser.add_argument('--emb1', default=100, type=int, help='k : the size of linear combination')
parser.add_argument('--emb2', default=100, type=int, help='m : the size of rank refularization')
parser.add_argument('--emb3_1', default=100, type=int, help='la : the size of set embedding')
parser.add_argument('--emb3_2', default=100, type=int, help='lb : the size of set embedding')

args = parser.parse_args()

if __name__ == '__main__':
    data = NodeClsData(args.dataset)
    mask = generate_mask(data.features, args.rate, args.type)
    apply_mask(data.features, mask)
    model = GCNfse(data, nhid=args.nhid, dropout=args.dropout, n_emb1=args.emb1, n_emb2=args.emb2, n_emb3_1=args.emb3_1, n_emb3_2=args.emb3_2)
    params = {
        'lr': args.lr,
        'weight_decay': args.wd,
        'epochs': args.epoch,
        'patience': args.patience,
        'early_stopping': True
    }
    trainer = NodeClsTrainer(data, model, params, niter=20, verbose=args.verbose)
    trainer.run()
Пример #8
0
    def preProcessData(self):

        self.toi_box_batch, self.label_batch, _ = select_toi(self.toi_batch)

        self.toi_box_entity, _, self.space_list_entity = select_toi(
            self.entity_batch)
        self.word_batch_var = self.whetherUseGpu(
            Variable(torch.LongTensor(np.array(self.word_batch))),
            self.config.if_gpu)
        self.mask_batch_var = self.whetherUseGpu(
            generate_mask(self.word_batch_var.shape), self.config.if_gpu)
        self.char_batch_var = self.whetherUseGpu(
            Variable(torch.LongTensor(np.array(self.char_batch))),
            self.config.if_gpu)
        self.pos_tag_batch_var = self.whetherUseGpu(
            Variable(torch.LongTensor(np.array(self.pos_tag_batch))),
            self.config.if_gpu)
        self.gold_label_vec = self.whetherUseGpu(
            Variable(torch.LongTensor(np.hstack(self.label_batch))),
            self.config.if_gpu)
        self.entity_nested_depth = []
        for each_entiy_list in self.entity_batch:
            gt_layer = [-1] * len(each_entiy_list)
            for id in range(len(gt_layer)):
                if gt_layer[id] == -1:
                    dfs(id, each_entiy_list, gt_layer)
            self.entity_nested_depth.append(gt_layer)
        self.entity_nested_depth = np.hstack(self.entity_nested_depth)
        self.entity_nested_depth[np.where(
            self.entity_nested_depth >= self.config.nested_depth
        )] = self.config.nested_depth - 1
        self.entity_nested_depth = self.whetherUseGpu(
            Variable(torch.LongTensor(self.entity_nested_depth)),
            self.config.if_gpu)

        if self.config.use_bert:
            # bert initiallization
            tokens_tensors = []
            for each in self.word_origin_batch:
                text = "[CLS] " + " ".join(each) + " [SEP]"

                if text not in self.text_to_bert:
                    tokens_length = []
                    text_subwords = self.tokenizer.tokenize(text)
                    st = 0
                    for each_word in each:
                        aim = self.tokenizer.tokenize(each_word)
                        tokens_length.append([st, st + len(aim)])
                        st = st + len(aim)
                    tokens_length = np.array(tokens_length)
                    sub_tokens = torch.tensor([
                        self.tokenizer.convert_tokens_to_ids(text_subwords)
                    ]).cuda()
                    text_embedding, _ = self.bertModel(sub_tokens)
                    text_embedding = torch.cat(text_embedding,
                                               dim=0).squeeze(1)
                    word_embedding = self.bertModel.embeddings.word_embeddings(
                        sub_tokens)
                    word_embedding = self.bertModel.embeddings.LayerNorm(
                        word_embedding)
                    text_embedding = torch.cat(
                        (text_embedding, word_embedding), dim=0)[:, 1:-1, :]
                    cumsum = torch.cat([
                        torch.zeros(text_embedding.size(0), 1,
                                    text_embedding.size(2)).cuda(),
                        torch.cumsum(text_embedding, 1)
                    ],
                                       dim=1)
                    boundary_len = Variable(
                        torch.FloatTensor(tokens_length[:, 1] -
                                          tokens_length[:, 0]),
                        requires_grad=False).cuda()
                    hidden_list = (
                        cumsum[:, tokens_length[:, 1], :] -
                        cumsum[:, tokens_length[:, 0], :]) / boundary_len.view(
                            1, boundary_len.size(0), 1)
                    self.text_to_bert_out[self.cnt] = hidden_list.cpu().numpy()
                    self.text_to_bert[text] = self.cnt
                    self.cnt += 1
                else:
                    hidden_list = torch.tensor(self.text_to_bert_out[
                        self.text_to_bert.get(text)]).cuda()
                tokens_tensors.append(hidden_list.unsqueeze(0))

            hidden_list = torch.cat(tokens_tensors, dim=0)

            if not self.config.fusion:
                hidden_list = hidden_list[:, -2:-1, :, :]
            else:
                pass
            self.hiddenList = hidden_list
        else:
            self.hiddenList = None
Пример #9
0
def train_model(training_images,
                training_flows,
                training_flows_ang,
                max_epoch,
                dataset_name='',
                start_model_idx=0,
                batch_size=16,
                channel=1):
    print('no. of images = %s' % len(training_images))

    assert len(training_images) == len(training_flows)
    h, w = training_images.shape[1:3]
    assert h < w

    training_images /= 0.5
    training_images -= 1.

    training_flows /= 0.5
    training_flows -= 1.

    training_flows_ang /= 0.5
    training_flows_ang -= 1.

    plh_frame_true = tf.placeholder(tf.float32, shape=[None, h, w, channel])
    plh_flow_true = tf.placeholder(tf.float32, shape=[None, h, w, channel])
    plh_flow_ang_true = tf.placeholder(tf.float32, shape=[None, h, w, channel])
    plh_is_training = tf.placeholder(tf.bool)

    # generator
    plh_dropout_prob = tf.placeholder_with_default(1.0, shape=())
    output_appe, h1_f_appe, h1_b_appe, encoder_vec = Generator_Appearance(
        plh_frame_true, plh_is_training, plh_dropout_prob, True)
    output_opt, output_opt_ang = Generator_Flow(output_appe, plh_is_training,
                                                plh_dropout_prob)

    # discriminator for true flow and fake flow
    D_real_flow, D_real_logits_flow = Discriminator_flow(plh_flow_true,
                                                         plh_is_training,
                                                         reuse=False)
    D_fake_flow, D_fake_logits_flow = Discriminator_flow(output_opt,
                                                         plh_is_training,
                                                         reuse=True)

    # appearance loss
    dy1, dx1 = tf.image.image_gradients(output_appe)
    dy0, dx0 = tf.image.image_gradients(plh_frame_true)
    loss_inten = tf.reduce_mean((output_appe - plh_frame_true)**2)
    loss_gradi = tf.reduce_mean(
        tf.abs(tf.abs(dy1) - tf.abs(dy0)) + tf.abs(tf.abs(dx1) - tf.abs(dx0)))
    loss_appe = loss_inten + loss_gradi
    # feature map loss
    loss_perceptual = tf.reduce_mean((h1_f_appe - h1_b_appe)**2)

    # optical loss
    loss_opt_mag = tf.reduce_mean(tf.abs(output_opt - plh_flow_true))
    loss_opt_ang = tf.reduce_mean(tf.abs(output_opt_ang - plh_flow_ang_true))
    loss_opt = loss_opt_mag + loss_opt_ang

    # GAN loss
    D_loss = 0.5*tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real_logits_flow, labels=tf.ones_like(D_real_flow))) + \
             0.5*tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits_flow, labels=tf.zeros_like(D_fake_flow)))
    G_loss = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(
            logits=D_fake_logits_flow, labels=tf.ones_like(D_fake_flow)))
    # G_loss = tf.reduce_mean((D_fake_logits_flow - D_real_logits_flow) ** 2)
    G_loss_total = 0.25 * G_loss + loss_appe + 2 * loss_opt + 0.2 * loss_perceptual

    # optimizers
    t_vars = tf.trainable_variables()
    g_vars = [var for var in t_vars if 'gen_' in var.name]
    d_vars = [var for var in t_vars if 'dis_' in var.name]

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        D_optimizer = tf.train.AdamOptimizer(learning_rate=0.00002,
                                             beta1=0.9,
                                             beta2=0.999,
                                             name='AdamD').minimize(
                                                 D_loss, var_list=d_vars)
        G_optimizer = tf.train.AdamOptimizer(learning_rate=0.0002,
                                             beta1=0.9,
                                             beta2=0.999,
                                             name='AdamG').minimize(
                                                 G_loss_total, var_list=g_vars)

    init_op = tf.global_variables_initializer()

    # tensorboard
    tf.summary.scalar('D_loss', D_loss)
    tf.summary.scalar('G_loss', G_loss)
    tf.summary.scalar('appe_loss', loss_appe)
    tf.summary.scalar('opt_loss', loss_opt)
    merge = tf.summary.merge_all()

    #
    saver = tf.train.Saver(max_to_keep=50)
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.9  # maximun alloc gpu 90% of MEM
    config.gpu_options.allow_growth = True  # allocate dynamically
    with tf.Session(config=config) as sess:
        sess.run(init_op)
        loss_anomaly = []
        loss_normal = []
        loss_recoder = []
        if start_model_idx > 0:
            saver.restore(
                sess, './training_saver/%s/model_ckpt_%d.ckpt' %
                (dataset_name, start_model_idx))
            loss_anomaly = scio.loadmat(
                './training_saver/%s/loss_anomaly_%d.mat' %
                (dataset_name, start_model_idx))['loss'][0]
            loss_normal = scio.loadmat(
                './training_saver/%s/loss_normal_%d.mat' %
                (dataset_name, start_model_idx))['loss'][0]
            loss_recoder = scio.loadmat(
                './training_saver/%s/loss_recoder_%d.mat' %
                (dataset_name, start_model_idx))['loss'][0]

        # define log path for tensorboard
        tensorboard_path = './training_saver/%s/logs/2/train' % (dataset_name)
        if not os.path.exists(tensorboard_path):
            pathlib.Path(tensorboard_path).mkdir(parents=True, exist_ok=True)

        train_writer = tf.summary.FileWriter(tensorboard_path, sess.graph)
        print('Run: tensorboard --logdir logs/2')
        # executive training stage
        is_mask = True
        for i in range(start_model_idx, max_epoch):
            if is_mask:
                mask = generate_mask(h, w)

            tf.set_random_seed(i)
            np.random.seed(i)

            batch_idx = np.array_split(
                np.random.permutation(len(training_images)),
                np.ceil(len(training_images) / batch_size))
            for j in range(len(batch_idx)):
                # discriminator
                _, curr_D_loss, summary = sess.run(
                    [D_optimizer, D_loss, merge],
                    feed_dict={
                        plh_frame_true: training_images[batch_idx[j]],
                        plh_flow_true: training_flows[batch_idx[j]],
                        plh_flow_ang_true: training_flows_ang[batch_idx[j]],
                        plh_is_training: True
                    })

                # genarator
                if j % len(batch_idx) == 0:
                    _, curr_G_loss, curr_loss_appe, curr_loss_opt, curr_loss_opt_ang, curr_gen_frames, curr_gen_flows, summary = \
                        sess.run([G_optimizer, G_loss, loss_appe, loss_opt_mag, loss_opt_ang, output_appe[:4],
                                  output_opt[:4], merge],
                                 feed_dict={plh_frame_true: training_images[batch_idx[j]],
                                            plh_flow_true: training_flows[batch_idx[j]],
                                            plh_flow_ang_true: training_flows_ang[batch_idx[j]],
                                            plh_dropout_prob: p_keep,
                                            plh_is_training: True,
                                            })
                    sample_images(dataset_name,
                                  training_flows[batch_idx[j][:4]],
                                  training_images[batch_idx[j][:4]],
                                  curr_gen_flows,
                                  curr_gen_frames,
                                  i,
                                  j,
                                  train=True)

                else:
                    _, curr_G_loss, curr_loss_appe, curr_loss_opt, curr_loss_opt_ang, summary = \
                        sess.run([G_optimizer, G_loss, loss_appe, loss_opt_mag, loss_opt_ang, merge],
                                 feed_dict={plh_frame_true: training_images[batch_idx[j]],
                                            plh_flow_true: training_flows[batch_idx[j]],
                                            plh_flow_ang_true: training_flows_ang[batch_idx[j]],
                                            plh_dropout_prob: p_keep,
                                            plh_is_training: True,
                                            })

                    # normal
                    curr_loss_appe_n = sess.run(
                        loss_appe,
                        feed_dict={
                            plh_frame_true: training_images[batch_idx[j]],
                            plh_dropout_prob: 1.0,
                            plh_is_training: False
                        })

                    if is_mask:
                        training_images_a = training_images[
                            batch_idx[j]].transpose(0, 3, 1, 2) * mask
                        training_images_a = training_images_a.transpose(
                            0, 2, 3, 1)

                    # pseudo-anomaly
                    curr_loss_appe_a, re_pseudo = sess.run(
                        [loss_appe, output_appe],
                        feed_dict={
                            plh_frame_true: training_images_a,
                            plh_dropout_prob: 1.0,
                            plh_is_training: False
                        })

                    loss_anomaly = np.append(loss_anomaly,
                                             curr_loss_appe_a)  # 伪异常的外观重构损失
                    loss_normal = np.append(loss_normal,
                                            curr_loss_appe_n)  # 正常的外观重构损失
                    loss_recoder = np.append(loss_recoder,
                                             curr_loss_appe)  # 训练重构损失

                    if j % 50 == 0:
                        print(
                            'Epoch: %d/%d, Batch: %3d/%d,  D = %.4f, G = %.4f, appe = %.4f, '
                            'flow = %.4f, ang = %.4f, anomaly = %.4f, normal = %.4f'
                            % (i + 1, max_epoch, j + 1, len(batch_idx),
                               curr_D_loss, curr_G_loss, curr_loss_appe,
                               curr_loss_opt, curr_loss_opt_ang,
                               curr_loss_appe_a, curr_loss_appe_n))

                    if np.isnan(curr_D_loss) or np.isnan(curr_G_loss) or np.isnan(curr_loss_appe) or np.isnan(
                            curr_loss_opt) \
                            or np.isnan(curr_loss_opt_ang):
                        return

                train_writer.add_summary(summary, i)
                train_writer.flush()  # 刷新缓冲区,立即写入文件

                # save the model in chkt form
                if (i + 1) % 10 == 0:
                    saver.save(
                        sess, './training_saver/%s/model_ckpt_%d.ckpt' %
                        (dataset_name, i + 1))
                    scio.savemat(
                        './training_saver/%s/loss_anomaly_%d.mat' %
                        (dataset_name, i + 1), {'loss': loss_anomaly})
                    scio.savemat(
                        './training_saver/%s/loss_normal_%d.mat' %
                        (dataset_name, i + 1), {'loss': loss_normal})
                    scio.savemat(
                        './training_saver/%s/loss_recoder_%d.mat' %
                        (dataset_name, i + 1), {'loss': loss_recoder})