コード例 #1
0
def extract(FLAG):
    video_path = FLAG.video_path
    video_list = getVideoList(FLAG.video_list)

    df = pd.DataFrame.from_dict(video_list)

    vgg16 = Extractor(shape=(240, 320, 3))
    vgg16.build(vgg16_npy_path=FLAG.init)

    load_extract_video(video_path=video_path,
                       df=df,
                       model=vgg16,
                       filename=os.path.join(FLAG.save_dir, FLAG.filename))
コード例 #2
0
ファイル: main.py プロジェクト: sayano-lee/mmc_ocr
def filter():
    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    dataset = ImageFolder(root=args.icdar_patches, transform=transform)
    loader = torch.utils.data.DataLoader(dataset, batch_size=10, shuffle=True)

    model = Extractor()
    for params in model.parameters():
        params.requires_grad = False
    acc = clustering(loader=loader, model=model)
コード例 #3
0
ファイル: dann.py プロジェクト: JinSakuma/pytorch_DANN
    def _build_models(self):
        gpu_flag = torch.cuda.is_available()

        extractor = Extractor()
        classifier = Classifier()
        discriminator = Discrimimator()

        if gpu_flag:
            self.device = torch.device('cuda:0')
        else:
            self.device = torch.device('cpu')

        self.extractor = extractor.to(self.device)
        self.classifier = classifier.to(self.device)
        self.discriminator = discriminator.to(self.device)
コード例 #4
0
    def __init__(self):
        super(Generator, self).__init__()

        weights = get_pretrained_weights(args.pretrained_wordembedding)
        self._embedding = WordEmbedding(args.embedding_size, weights, trainable=args.trainable_wordembed)

        self._senenc = ConvSentEncoder(args.embedding_size,
            args.sentembed_size,
            args.max_filter_length,
            args.dropout)

        self._docenc = DocEncoder(args.sentembed_size,
            args.size,
            args.num_layers,
            args.rnn_cell,
            args.dropout,
            args.bidirectional)

        self._docext = AttnExtractor(args.sentembed_size,
            args.size,
            args.num_layers,
            args.rnn_cell,
            coverage=args.coverage) if args.attn else Extractor(args.sentembed_size,
            args.size,
            args.num_layers,
            args.rnn_cell)

        if args.aux_embedding:
            weights = get_pretrained_weights(args.pretrained_aux_wordembedding)
            self._aux_embedding = WordEmbedding(args.embedding_size, weights, trainable=args.trainable_wordembed)
            self._aux_senenc = ConvSentEncoder(args.embedding_size,
                args.sentembed_size,
                args.max_filter_length,
                args.dropout)
            self._linear = nn.Linear(args.sentembed_size*2, args.sentembed_size)
コード例 #5
0
ファイル: freq_test.py プロジェクト: mingyr/watermarking
def main(unused_argv):
    if FLAGS.checkpoint_dir == '' or not os.path.exists(FLAGS.checkpoint_dir):
        raise ValueError('invalid checkpoint directory {}'.format(
            FLAGS.checkpoint_dir))

    checkpoint_dir = os.path.join(FLAGS.checkpoint_dir, '')

    if FLAGS.output_dir == '':
        raise ValueError('invalid output directory {}'.format(
            FLAGS.output_dir))
    elif not os.path.exists(FLAGS.output_dir):
        assert FLAGS.output_dir != FLAGS.checkpoint_dir
        os.makedirs(FLAGS.output_dir)

    print('reconstructing models and inputs.')
    image = Image('/data/yuming/watermark-data/image_paths.mat',
                  FLAGS.image_seq)()
    wm = Watermark('/data/yuming/watermark-data/watermark.mat')()

    dim = [1, FLAGS.img_height, FLAGS.img_width, FLAGS.num_chans]
    image_upsampler = Upsampler(dim)
    wm_upsampler = Upsampler([1] + dim[1:])
    downsampler = Downsampler(dim)
    blender = Blender(dim)
    extrator = Extractor(dim)

    image_upsampled = image_upsampler(image)
    wm_upsampled = wm_upsampler(wm)
    image_blended = blender(image_upsampled, wm_upsampled)
    image_downsampled = downsampler(image_blended)

    mask = Mask(FLAGS.img_height, FLAGS.img_width, 80)()
    mask = tf.cast(mask, tf.complex64)
    freqimage = FreqImage(mask)

    image_freqfiltered = freqimage(image_downsampled)
    wm_extracted = extrator(image_freqfiltered)

    enhance = Enhance(sharpen=True)
    wm_extracted = enhance(wm_extracted)

    saver = tf.train.Saver()
    writer = tf.summary.FileWriter(FLAGS.output_dir, tf.get_default_graph())

    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=False)
    assert (FLAGS.gpus != ''), 'invalid GPU specification'
    config.gpu_options.visible_device_list = FLAGS.gpus

    with tf.Session(config=config) as sess:
        sess.run(tf.local_variables_initializer())

        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            # Restores from checkpoint
            saver.restore(sess, ckpt.model_checkpoint_path)
            # Assuming model_checkpoint_path looks something like:
            #   /my-favorite-path/cifar10_train/model.ckpt-0,
            # extract global_step from it.
            global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                '-')[-1]
        else:
            print('No checkpoint file found')
            return

        wm_val, image_downsampled_val, image_freqfiltered_val, wm_extracted_val = \
            sess.run([wm, image_downsampled, image_freqfiltered, wm_extracted])

        images = [{
            'data':
            np.squeeze(image_downsampled_val[0, :, :, :].astype(np.uint8)),
            'title':
            "watermarked image"
        }, {
            'data':
            np.squeeze(image_freqfiltered_val[0, :, :, :].astype(np.uint8)),
            'title':
            "filtered image"
        }, {
            'data': np.squeeze(wm_val[0, :, :, :].astype(np.uint8)),
            'title': "original watermark"
        }, {
            'data':
            np.squeeze(wm_extracted_val[0, :, :, :].astype(np.uint8)),
            'title':
            "extracted watermark"
        }]

        image_str = draw_image(images)
        writer.add_summary(image_str, global_step=0)

        np.set_printoptions(threshold=sys.maxsize)
        print(np.squeeze(wm_extracted_val))

    writer.close()
コード例 #6
0
s3_loader_raw = torch.utils.data.DataLoader(s3_set,
                                            batch_size=batch_size,
                                            shuffle=shuffle,
                                            num_workers=num_workers,
                                            drop_last=True)
t_loader_raw = torch.utils.data.DataLoader(t_set,
                                           batch_size=batch_size,
                                           shuffle=shuffle,
                                           num_workers=num_workers,
                                           drop_last=True)
t_loader_test = torch.utils.data.DataLoader(t_set_test,
                                            batch_size=batch_size,
                                            shuffle=False,
                                            num_workers=num_workers)

extractor = Extractor()
s1_classifier = Classifier(num_classes=num_classes)
s2_classifier = Classifier(num_classes=num_classes)
s3_classifier = Classifier(num_classes=num_classes)
s1_t_discriminator = Discriminator()
s2_t_discriminator = Discriminator()
s3_t_discriminator = Discriminator()

extractor.load_state_dict(
    torch.load(
        osp.join(
            MAIN_DIR,
            "MSDA/A_W_2_D_Open/bvlc_A_W_2_D/pretrain/bvlc_extractor.pth")))
extractor = nn.DataParallel(extractor)
extractor = extractor.cuda()
コード例 #7
0
ファイル: svm.py プロジェクト: sayano-lee/mmc_ocr
        pbar.update(1)
        im, ann, im_fns = data[0], data[1], data[2]
        im = im.cuda()
        feat = model(im)
        x = feat.cpu().numpy()
        y = ann.numpy()

        precision = precision + clf.score(x, y)
    pbar.close()

    print("\nAverage Precision is {}".format(precision / len(loader)))


if __name__ == '__main__':

    icdar_patches = "./data/icdar2015/patches"

    dataset = ImageFolder(root=icdar_patches, transform=transforms)
    loader = torch.utils.data.DataLoader(dataset, batch_size=20, shuffle=True)

    # extractor for deep features
    model = Extractor()
    model = model.cuda()
    for params in model.parameters():
        params.requires_grad = False

    # vanilla svm
    clf = svm.SVC(kernel="rbf", gamma=10)

    train(loader=loader, model=model, clf=clf)
コード例 #8
0
                                            shuffle=shuffle,
                                            num_workers=num_workers)
s2_loader_raw = torch.utils.data.DataLoader(s2_set,
                                            batch_size=batch_size,
                                            shuffle=shuffle,
                                            num_workers=num_workers)
t_loader_raw = torch.utils.data.DataLoader(t_set,
                                           batch_size=batch_size,
                                           shuffle=shuffle,
                                           num_workers=num_workers)
t_loader_test = torch.utils.data.DataLoader(t_set_test,
                                            batch_size=batch_size,
                                            shuffle=False,
                                            num_workers=num_workers)

extractor = Extractor().cpu()  #.cuda(gpu_id)
#extractor.load_state_dict(torch.load("/home/xuruijia/ZJY/ADW/bvlc_A_W_2_D/pretrain/bvlc_extractor.pth"))
s1_classifier = Classifier(num_classes=num_classes).cpu()  #.cuda(gpu_id)
s2_classifier = Classifier(num_classes=num_classes).cpu()  #.cuda(gpu_id)
#s1_classifier.load_state_dict(torch.load("/home/xuruijia/ZJY/ADW/bvlc_A_W_2_D/pretrain/bvlc_s1_cls.pth"))
#s2_classifier.load_state_dict(torch.load("/home/xuruijia/ZJY/ADW/bvlc_A_W_2_D/pretrain/bvlc_s2_cls.pth"))
s1_t_discriminator = Discriminator().cpu()  #.cuda(gpu_id)
s2_t_discriminator = Discriminator().cpu()  #.cuda(gpu_id)


def print_log(step, epoch, epoches, lr, l1, l2, l3, l4, l5, l6, l7, l8, flag,
              ploter, count):
    print("Step [%d/%d] Epoch [%d/%d] lr: %f, s1_cls_loss: %.4f, s2_cls_loss: %.4f, s1_t_dis_loss: %.4f, " \
          "s2_t_dis_loss: %.4f, s1_t_confusion_loss_s1: %.4f, s1_t_confusion_loss_t: %.4f, " \
          "s2_t_confusion_loss_s2: %.4f, s2_t_confusion_loss_t: %.4f, selected_source: %s" \
          % (step, steps, epoch, epoches, lr, l1, l2, l3, l4, l5, l6, l7, l8, flag))
コード例 #9
0
def main(unused_argv):
    summ = Summaries()

    if FLAGS.data_dir == '' or not os.path.exists(FLAGS.data_dir):
        raise ValueError('invalid data directory {}'.format(FLAGS.data_dir))

    if FLAGS.output_dir == '':
        raise ValueError('invalid output directory {}'.format(FLAGS.output_dir))
    elif not os.path.exists(FLAGS.output_dir):
        os.makedirs(FLAGS.output_dir)   

    event_log_dir = os.path.join(FLAGS.output_dir, '')
    
    checkpoint_path = os.path.join(FLAGS.output_dir, 'model.ckpt')

    print('Constructing models.')

    dim = [FLAGS.batch_size, FLAGS.img_height, FLAGS.img_width, FLAGS.num_chans]
    image_upsampler = Upsampler(dim)
    wm_upsampler = Upsampler([1] + dim[1:])
    image_downsampler = Downsampler(dim)
    blender = Blender(dim)
    extrator = Extractor(dim)

    train_loss, train_op, train_summ_op = \
        train(FLAGS.data_dir, image_upsampler, wm_upsampler, blender, image_downsampler, extrator, summ)
    val_summ_op = val(FLAGS.data_dir, image_upsampler, wm_upsampler, blender, image_downsampler, extrator, summ)

    print('Constructing saver.')
    saver = tf.train.Saver()

    # Start running operations on the Graph. allow_soft_placement must be set to
    # True to as some of the ops do not have GPU implementations.
    config = tf.ConfigProto(allow_soft_placement = True, log_device_placement = False)

    assert (FLAGS.gpus != ''), 'invalid GPU specification'
    config.gpu_options.visible_device_list = FLAGS.gpus

    # Build an initialization operation to run below.
    init = [tf.global_variables_initializer(), tf.local_variables_initializer()]

    with tf.Session(config = config) as sess:
        sess.run(init)

        writer = tf.summary.FileWriter(event_log_dir, graph = sess.graph)

        # Run training.
        for itr in range(FLAGS.num_iterations):
            cost, _, train_summ_str = sess.run([train_loss, train_op, train_summ_op])
            # Print info: iteration #, cost.
            print(str(itr) + ' ' + str(cost))

            writer.add_summary(train_summ_str, itr)

            if itr % FLAGS.validation_interval == 1:
                # Run through validation set.
                val_summ_str = sess.run(val_summ_op)
                writer.add_summary(val_summ_str, itr)

        tf.logging.info('Saving model.')
        saver.save(sess, checkpoint_path)
        tf.logging.info('Training complete')
コード例 #10
0
source_training_liquid_labels = source_training_liquid_labels / math.sqrt(1000)
source_validation_liquid_labels = source_validation_liquid_labels / math.sqrt(
    1000)
target_liquid_labels = target_liquid_labels / math.sqrt(1000)
length_source_training = source_training_gas_labels.size()[0]
length_source_validation = source_validation_gas_labels.size()[0]
length_target = target_gas_labels.size()[0]

#Parameters
Batch_size_s = int(length_source_training / 25) + 1
Batch_size_t = int(length_target / 25) + 1
num_epochs = 200
m = 1
n = 10**(-7)
E = Extractor()
D = Discriminator()
R = Regressor()
E.apply(weights_init)
D.apply(weights_init)
R.apply(weights_init)
e_learning_rate = 0.00003
d_learning_rate = 0.00015
r_learning_rate = 0.0000001
e_optimizer = optim.RMSprop(E.parameters(), lr=e_learning_rate, alpha=0.9)
d_optimizer = optim.RMSprop(D.parameters(), lr=d_learning_rate, alpha=0.9)
r_optimizer = optim.RMSprop(R.parameters(), lr=r_learning_rate, alpha=0.9)
e_steps = 1
d_steps = 1
r_steps = 1
コード例 #11
0
    shuffle=shuffle, num_workers=num_workers)
s2_loader_raw = torch.utils.data.DataLoader(s2_set, batch_size=batch_size,
    shuffle=shuffle, num_workers=num_workers)
t_loader_raw = torch.utils.data.DataLoader(t_set, batch_size=batch_size,
    shuffle=shuffle, num_workers=num_workers)
t_loader_test = torch.utils.data.DataLoader(t_set_test, batch_size=batch_size,
    shuffle=False, num_workers=num_workers)

s1_loader_raw1 = torch.utils.data.DataLoader(s1_set, batch_size=1,
    shuffle=shuffle, pin_memory=True)
s2_loader_raw1 = torch.utils.data.DataLoader(s2_set, batch_size=1,
    shuffle=shuffle, pin_memory=True)
t_loader_raw1 = torch.utils.data.DataLoader(t_set, batch_size=1,
    shuffle=shuffle,pin_memory=True)

extractor = Extractor().cpu()
extractor.load_state_dict(torch.load("/Users/bytedabce/PycharmProjects/mix_net/train_eval/pre_train_model/bvlc_extractor.pth"))
s1_classifier = Classifier(num_classes=num_classes).cpu()
s2_classifier = Classifier(num_classes=num_classes).cpu()
s1_classifier.load_state_dict(torch.load("/Users/bytedabce/PycharmProjects/mix_net/train_eval/pre_train_model/bvlc_s1_cls.pth"))
s2_classifier.load_state_dict(torch.load("/Users/bytedabce/PycharmProjects/mix_net/train_eval/pre_train_model/bvlc_s2_cls.pth"))
s1_t_discriminator = Discriminator().cpu()
s2_t_discriminator = Discriminator().cpu()




def print_log(step, epoch, epoches, lr, l1, l2, l3, l4, l5, l6, l7, l8, flag, ploter, count):
    print ("Step [%d/%d] Epoch [%d/%d] lr: %f, s1_cls_loss: %.4f, s2_cls_loss: %.4f, s1_t_dis_loss: %.4f, " \
          "s2_t_dis_loss: %.4f, s1_t_confusion_loss_s1: %.4f, s1_t_confusion_loss_t: %.4f, " \
          "s2_t_confusion_loss_s2: %.4f, s2_t_confusion_loss_t: %.4f, selected_source: %s" \
コード例 #12
0
ファイル: train_eval.py プロジェクト: xurj3/MSDA
                                            shuffle=shuffle,
                                            num_workers=num_workers)
s2_loader_raw = torch.utils.data.DataLoader(s2_set,
                                            batch_size=batch_size,
                                            shuffle=shuffle,
                                            num_workers=num_workers)
t_loader_raw = torch.utils.data.DataLoader(t_set,
                                           batch_size=batch_size,
                                           shuffle=shuffle,
                                           num_workers=num_workers)
t_loader_test = torch.utils.data.DataLoader(t_set_test,
                                            batch_size=batch_size,
                                            shuffle=False,
                                            num_workers=num_workers)

extractor = Extractor().cuda(gpu_id)
extractor.load_state_dict(
    torch.load(
        "/home/xuruijia/ZJY/ADW/bvlc_A_W_2_D/pretrain/bvlc_extractor.pth"))
s1_classifier = Classifier(num_classes=num_classes).cuda(gpu_id)
s2_classifier = Classifier(num_classes=num_classes).cuda(gpu_id)
s1_classifier.load_state_dict(
    torch.load("/home/xuruijia/ZJY/ADW/bvlc_A_W_2_D/pretrain/bvlc_s1_cls.pth"))
s2_classifier.load_state_dict(
    torch.load("/home/xuruijia/ZJY/ADW/bvlc_A_W_2_D/pretrain/bvlc_s2_cls.pth"))
s1_t_discriminator = Discriminator().cuda(gpu_id)
s2_t_discriminator = Discriminator().cuda(gpu_id)


def print_log(step, epoch, epoches, lr, l1, l2, l3, l4, l5, l6, l7, l8, flag,
              ploter, count):
コード例 #13
0
                                            shuffle=shuffle,
                                            num_workers=num_workers)
s2_loader_raw = torch.utils.data.DataLoader(s2_set,
                                            batch_size=batch_size,
                                            shuffle=shuffle,
                                            num_workers=num_workers)
t_loader_raw = torch.utils.data.DataLoader(t_set,
                                           batch_size=batch_size,
                                           shuffle=shuffle,
                                           num_workers=num_workers)
t_loader_test = torch.utils.data.DataLoader(t_set_test,
                                            batch_size=batch_size,
                                            shuffle=False,
                                            num_workers=num_workers)

extractor = Extractor()
s1_classifier = Classifier(num_classes=num_classes)
s2_classifier = Classifier(num_classes=num_classes)
s1_t_discriminator = Discriminator()
s2_t_discriminator = Discriminator()

extractor.load_state_dict(
    torch.load(osp.join(MAIN_DIR, "MSDA/pretrain/office/bvlc_extractor.pth")))
extractor = nn.DataParallel(extractor)
extractor = extractor.cuda()

s1_classifier.load_state_dict(
    torch.load(osp.join(MAIN_DIR, "MSDA/pretrain/office/bvlc_s1_cls.pth")))
s2_classifier.load_state_dict(
    torch.load(osp.join(MAIN_DIR, "MSDA/pretrain/office/bvlc_s2_cls.pth")))
s1_classifier = nn.DataParallel(s1_classifier)
コード例 #14
0
source_validation_features, source_validation_gas_labels, source_validation_liquid_labels = info_transfer2FloatTensor(
																source_validation_features, source_validation_labels)
target_features, target_gas_labels, target_liquid_labels = info_transfer2FloatTensor(target_features, 
																						target_labels)

source_training_liquid_labels = source_training_liquid_labels / math.sqrt(1000)
source_validation_liquid_labels = source_validation_liquid_labels / math.sqrt(1000)
target_liquid_labels =target_liquid_labels / math.sqrt(1000)
length_target = target_gas_labels.size()[0]

target_dataset = Data.TensorDataset(target_features,target_liquid_labels)
Batch_size = 128
target_loader = Data.DataLoader(dataset=target_dataset, batch_size=Batch_size,
								shuffle=False, num_workers=2)

E = Extractor()
R = Regressor()
E.load_state_dict(torch.load('E_l2.pkl'))
R.load_state_dict(torch.load('R_l2.pkl'))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
E.to(device)
R.to(device)
prediction_target, reference_target = predict(target_loader, E, R)

#plot
standard_line_x = [0, 6500]
standard_line_y = [0, 6500]
error_line_x_p500 = [0, 6000]
error_line_y_p500 = [500, 6500]
error_line_x_n500 = [500, 6500]
error_line_y_n500 = [0, 6000]
コード例 #15
0
ファイル: psnr.py プロジェクト: mingyr/watermarking
def main(unused_argv):
    if FLAGS.checkpoint_dir == '' or not os.path.exists(FLAGS.checkpoint_dir):
        raise ValueError('invalid checkpoint directory {}'.format(FLAGS.checkpoint_dir))

    checkpoint_dir = os.path.join(FLAGS.checkpoint_dir, '')

    if FLAGS.output_dir == '':
        raise ValueError('invalid output directory {}'.format(FLAGS.output_dir))
    elif not os.path.exists(FLAGS.output_dir):
        assert FLAGS.output_dir != FLAGS.checkpoint_dir
        os.makedirs(FLAGS.output_dir)

    print('reconstructing models and inputs.')
    image = Image('/data/yuming/watermark-data/image_paths.mat', FLAGS.image_seq)()
    wm = Watermark('/data/yuming/watermark-data/watermark.mat')()

    dim = [1, FLAGS.img_height, FLAGS.img_width, FLAGS.num_chans]
    image_upsampler = Upsampler(dim)
    wm_upsampler = Upsampler([1] + dim[1:])
    downsampler = Downsampler(dim)
    blender = Blender(dim)
    extrator = Extractor(dim)

    image_upsampled = image_upsampler(image)
    wm_upsampled = wm_upsampler(wm)
    image_blended = blender(image_upsampled, wm_upsampled)
    image_downsampled = downsampler(image_blended)
    wm_extracted = extrator(image_downsampled)

    # Calculate the psnr of the model.
    psnr = PSNR()
    image_psnr = psnr(image, image_downsampled)
    wm_psnr = psnr(wm, wm_extracted)
    
    summ_psnr_op = tf.summary.merge([tf.summary.text('image_psnr', tf.as_string(image_psnr)),
                                     tf.summary.text('wm_psnr', tf.as_string(wm_psnr))])
    saver = tf.train.Saver()
    writer = tf.summary.FileWriter(FLAGS.output_dir, tf.get_default_graph()) 
    
    config = tf.ConfigProto(allow_soft_placement = True, log_device_placement = False)
    assert (FLAGS.gpus != ''), 'invalid GPU specification'
    config.gpu_options.visible_device_list = FLAGS.gpus

    with tf.Session(config = config) as sess:
        sess.run(tf.local_variables_initializer())

        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            # Restores from checkpoint
            saver.restore(sess, ckpt.model_checkpoint_path)
            # Assuming model_checkpoint_path looks something like:
            #   /my-favorite-path/cifar10_train/model.ckpt-0,
            # extract global_step from it.
            global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        else:
            print('No checkpoint file found')
            return

        summ_psnr_str, image_val, image_downsampled_val = \
            sess.run([summ_psnr_op, image, image_downsampled])

        writer.add_summary(summ_psnr_str, global_step = 0)

        '''
        images = [{'data': np.squeeze(image_val[0, :, :, :].astype(np.uint8)), 'title': "original image"},
                  {'data': np.squeeze(image_downsampled_val[0, :, :, :].astype(np.uint8)), 'title': "watermarked image"}]
        '''

        images = [{'data': np.squeeze(image_val[0, :, :, :].astype(np.uint8)), 'title': ""},
                  {'data': np.squeeze(image_downsampled_val[0, :, :, :].astype(np.uint8)), 'title': ""}]

        
        image_str = draw_image(images)
        writer.add_summary(image_str, global_step = 0)