コード例 #1
0
def build_model(
    frames,
    class_num,
    dropout_keep_prob,
    reuse,
    training,
):
    with tf.variable_scope('RGB', reuse=reuse):
        encoder_model = InceptionI3d(class_num,
                                     spatial_squeeze=True,
                                     final_endpoint='Logits')
        logits, endpoints = encoder_model(frames,
                                          is_training=training,
                                          dropout_keep_prob=dropout_keep_prob)

        predictions = tf.nn.softmax(logits)

    with tf.variable_scope('Reconstructor', reuse=reuse):
        """Reconstructor
                This reconstructor is used to reconstruct video from 
                high-dimensional features extracted by I3D. It output
                a video that trained to be close to the original video,
                but in a reversed order.
        """
        reconstructor = Reconstructor(training=training)
        reconstructed_video = reconstructor.reconstruct(
            endpoints['Conv3d_2c_3x3'], frames[:, -1])

    return logits, predictions, reconstructed_video
コード例 #2
0
def playground(params):
    speaker_categs = torch.load(params.speaker_categs_path)
    num_speakers, speaker_feature_dim = speaker_categs.size()

    describer_model = util.load_model(params.header + DESCRIBER_FOOTER)
    describer = Describer(
        describer_model, speaker_feature_dim)
    describer.eval()

    reconstructor_model = util.load_model(params.header + RECONSTRUCTOR_FOOTER)
    reconstructor = Reconstructor(reconstructor_model, params.log_frac)

    latent_forger_model = util.load_model(params.header + LATENT_FORGER_FOOTER)
    latent_forger = LatentForger(latent_forger_model)

    describer.load_state_dict(torch.load(
        'snapshots/' + params.header + DESCRIBER_FOOTER + '.pth',
        map_location=lambda storage, loc: storage))
    reconstructor.load_state_dict(torch.load(
        'snapshots/' + params.header + RECONSTRUCTOR_FOOTER + '.pth',
        map_location=lambda storage, loc: storage))
    latent_forger.load_state_dict(torch.load(
        'snapshots/' + params.header + LATENT_FORGER_FOOTER + '.pth',
        map_location=lambda storage, loc: storage))

    IPython.embed()
コード例 #3
0
 def reconstructDatabase(self):
     # Create a Reconstructor that will use the WORKLOAD_COL to regenerate
     # the original database and extract a schema catalog.
     r = Reconstructor(self.metadata_db, self.dataset_db)
     
     # Clear our existing data
     if self.clean: r.clean()
     
     # Bombs away!
     r.process()
     LOG.info("Processed %d sessions with %d operations into '%s'" % (\
              r.getSessionCount(), r.getOpCount(), self.dataset_db.name))
     if self.debug:
         LOG.debug("Skipped Operations: %d" % r.getOpSkipCount())
         LOG.debug("Fixed Operations: %d" % r.getOpFixCount())
         LOG.debug("Collection Sizes:\n%s" % pformat(r.getCollectionCounts()))
コード例 #4
0
def StartReconstruction(datastore):
    reconstructor = Reconstructor()
    project_ini = reconstructor.load_project_ini(
        path.realpath(datastore.ini_path))
    reconstructor.project_ini = project_ini

    rp = path.realpath(datastore.root_path)
    if datastore.recursive == True:
        filemanager = FileManager(rp)
    else:
        filemanager = [rp]

    for cnt, rel_path in enumerate(filemanager):
        current_project_path = path.join(
            datastore.root_path,
            rel_path) if datastore.root_path != "./" else "./"
        reconstructor.root_dir = current_project_path
        print("#" + (cnt + 1).__str__() + "# " + "Working on " +
              current_project_path)
        print(datastore.steps)
        steps = Steps.fromBinaryString(datastore.steps)

        if Steps.matcher in steps:
            matcher_index = steps.index(Steps.matcher)
            steps[matcher_index].matching_strategy = MatchingStrategy[
                datastore.matching_strategy]

        [reconstructor.execute_step(s) for s in steps]
コード例 #5
0
with open(os.path.join(SOURCE_DATA_PATH, "conv_options.pkl"), 'rb') as f:
    conv_options = pickle.load(f)

example_tensor = torch.tensor(0.0)
if torch.cuda.is_available():
    example_tensor = example_tensor.cuda()


describer = util.load_model(DESCRIBER_NAME)
describer = Describer(describer, CATEG_SIZE)
describer.load_state_dict(torch.load(DESCRIBER_SNAPSHOT_PATH))
describer.eval()

reconstructor = util.load_model(RECONSTRUCTOR_NAME)
reconstructor = Reconstructor(reconstructor)
reconstructor.load_state_dict(torch.load(RECONSTRUCTOR_SNAPSHOT_PATH))
reconstructor.eval()

if example_tensor.is_cuda:
    describer = describer.cuda()
    reconstructor = reconstructor.cuda()


for speaker in range(SPEAKER_START_INDEX, NUM_SPEAKERS):
    path = os.path.join(
        SOURCE_DATA_PATH, "speech_" + str(speaker) + ".npy")
    speech = np.load(path)

    path = os.path.join(
        SOURCE_DATA_PATH, "sizes_" + str(speaker) + ".npy")
コード例 #6
0
	while ntotalevents-options.n_jobs*nanalysisevents > nanalysisevents and (options.n_jobs-1)*(nanalysisevents+1)<ntotalevents :
		nanalysisevents+=1
#copy the subset tree to be analyzed 
garbageFile.cd()
analysisTree = chain.CopyTree('','',nanalysisevents,options.i_job*nanalysisevents); analysisTree.SetDirectory(garbageFile) 
nanalysisevents = analysisTree.GetEntries() 
print 'number of analyzed events for this job = %d'%(nanalysisevents) 
#Set filename for analyzer from sample name
filename = options.name
if options.jec != 'nominal' :
	filename+='_'+options.jec
if options.n_jobs>1 :
	filename+='_'+str(options.i_job)
filename+='_tree.root'
#Initialize analyzer
analyzer = Reconstructor(filename, analysisTree, data, options.xSec, options.kFac, options.jec, options.on_grid, total_pileup_histo, totweight, renormalization_dict) 

#Counter 
count = 0

#If there is no max_events argument, set it to the total number of analyzed events
maxEvents = options.max_events if options.max_events!=-1 else nanalysisevents
setupDoneTime = time(); setupTime = timedelta(seconds=setupDoneTime-startTime)
print 'Done Setting up; time taken: %02d:%02d:%02d'%(setupTime.seconds/3600,(setupTime.seconds%3600)/60,(setupTime.seconds%60))

##########								Main Event Loop								##########
print 'Files opened, starting event loop'
for event in range(nanalysisevents) : 
	count+=1
	#check the max events 
	if count == maxEvents+1 :
コード例 #7
0
    def reconstructDatabase(self):
        # Create a Reconstructor that will use the WORKLOAD_COL to regenerate
        # the original database and extract a schema catalog.
        r = Reconstructor(self.metadata_db, self.dataset_db)

        # Clear our existing data
        if self.clean: r.clean()

        # Bombs away!
        r.process()
        LOG.info("Processed %d sessions with %d operations into '%s'" % (\
                 r.getSessionCount(), r.getOpCount(), self.dataset_db.name))
        if self.debug:
            LOG.debug("Skipped Operations: %d" % r.getOpSkipCount())
            LOG.debug("Fixed Operations: %d" % r.getOpFixCount())
            LOG.debug("Collection Sizes:\n%s" %
                      pformat(r.getCollectionCounts()))
コード例 #8
0
    drawer.drawFlow(pts1, pts2, 'goodEnough.jpg', img2)
    outlierer = Outlierer()
    visOdom = VisOdometry(cam=cam, ptsold=pts2, ptsnew=pts1)

    # magnitude pts
    out1, out2 = outlierer.optFLowMagnOutliers(pts1, pts2)  # new, old
    clst1 = Cluster(out2, out1)

    # points by essential matrix
    clst2 = Cluster(visOdom.outold, visOdom.outnew)
    rects = unionRects(
        clst1.unionClustToRect(clst1.clstnewpts, clst1.clstnewpts),
        clst1.unionClustToRect(clst2.clstnewpts, clst2.clstnewpts))

    reconst = Reconstructor(cam, visOdom.R, visOdom.t)

    scaleFactor = calcScaleFactor(SCALE, scaleFactor, reconst)
    SCALE = True

    if (RECONSTRUCT):
        road1, road2 = selector.getPtsOnRoad()
        road_plane, pts = reconst.pointCloud(road1, road2, PLOT)
        road_pts1, road_pts2 = reconst.pointsOnRoad(selector.optFlow.img1,
                                                    selector.optFlow.img2,
                                                    road_plane)
        #...

    if (len(rects) != 0):
        storage = rects
    else:
コード例 #9
0
def train_analysts(params):
    speaker_categs = torch.load(params.speaker_categs_path)
    speaker_feature_dim = speaker_categs.size()[1]

    describer_model = util.load_model(params.header + DESCRIBER_FOOTER)
    describer = Describer(describer_model, speaker_feature_dim)
    util.initialize(describer)

    reconstructor_model = util.load_model(params.header + RECONSTRUCTOR_FOOTER)
    reconstructor = Reconstructor(reconstructor_model, params.log_frac)
    util.initialize(reconstructor)

    examiner_model = util.load_model(params.header + EXAMINER_FOOTER)
    distinguisher_model = util.load_model(params.header + DISTINGUISHER_FOOTER)
    discriminator = PairDiscriminator(examiner_model, distinguisher_model)
    util.initialize(discriminator)
    discriminator.train()

    if example_tensor.is_cuda:
        speaker_categs = speaker_categs.cuda()
        describer = describer.cuda()
        reconstructor = reconstructor.cuda()
        discriminator = discriminator.cuda()

    optim = torch.optim.Adam(
        torch.nn.Sequential(describer, reconstructor).parameters(),
        lr=params.lr)
    advers_optim = torch.optim.Adam(
        discriminator.parameters(),
        lr=params.advers_lr)

    data_loader = VCTKLoader(
        params.data_path, example_tensor, features='log')
    data_iterator = iter(data_loader)

    categ_loss_sum_batch = 0.0
    robustness_loss_sum_batch = 0.0
    reconst_loss_sum_batch = 0.0
    gen_loss_batch = 0.0
    advers_loss_batch = 0.0

    loss_sum_batch = 0.0
    discrim_loss_sum_batch = 0.0
    num_in_batch = 0

    period = 0
    while period < params.num_periods:
        period += 1

        loss_sum = 0.0
        loss_count = 0

        print(util.COMMENT_HEADER, end='')

        for _ in range(params.period_size):
            orig, orig_speaker = next(data_iterator)
            orig_categ = speaker_categs[orig_speaker].unsqueeze(0)

            describer.eval()
            center_categ = describer.categ(orig).detach()
            describer.train()

            (latent, metadata, pred_categ) = describer.describe(orig)
            reconst = reconstructor.reconst(latent, metadata)

            truth = 0 if (np.random.random() < 0.5) else 1

            if truth == 0:
                decision = discriminator.discriminate(
                    orig_categ, orig, reconst)
            else:
                decision = discriminator.discriminate(
                    orig_categ, reconst, orig)

            categ_loss = describer.categ_loss(pred_categ, orig_categ)
            robustness_loss = describer.categ_loss(pred_categ, center_categ)
            reconst_loss = reconstructor.reconst_loss(reconst, orig)
            gen_loss = discriminator.gen_loss(decision, truth)
            advers_loss = discriminator.advers_loss(decision, truth)

            loss = (
                params.categ_term * categ_loss +
                params.robustness_term * robustness_loss +
                reconst_loss +
                params.advers_term * gen_loss)
            discrim_loss = advers_loss

            categ_loss_sum_batch += categ_loss.item()
            robustness_loss_sum_batch += robustness_loss.item()
            reconst_loss_sum_batch += reconst_loss.item()
            gen_loss_batch += gen_loss.item()
            advers_loss_batch += advers_loss.item()

            loss_sum_batch = loss_sum_batch + loss
            discrim_loss_sum_batch = discrim_loss_sum_batch + discrim_loss
            num_in_batch += 1

            if num_in_batch >= params.batch_size:
                mean_discrim_loss = discrim_loss_sum_batch / num_in_batch
                if gen_loss_batch / num_in_batch <= 10.0:
                    advers_optim.zero_grad()
                    mean_discrim_loss.backward(retain_graph=True)
                    advers_optim.step()

                mean_loss = loss_sum_batch / num_in_batch
                optim.zero_grad()
                mean_loss.backward()
                optim.step()

                print("(" + "|".join([
                    "%0.3f" % (categ_loss_sum_batch / num_in_batch),
                    "%0.3f" % (robustness_loss_sum_batch / num_in_batch),
                    "%0.3f" % (reconst_loss_sum_batch / num_in_batch),
                    "%0.3f" % (gen_loss_batch / num_in_batch),
                    "%0.3f" % (advers_loss_batch / num_in_batch)]) + ")",
                    end=' ', flush=True)

                categ_loss_sum_batch = 0.0
                robustness_loss_sum_batch = 0.0
                reconst_loss_sum_batch = 0.0
                gen_loss_batch = 0.0
                advers_loss_batch = 0.0

                loss_sum_batch = 0.0
                discrim_loss_sum_batch = 0.0
                num_in_batch = 0

            loss_sum += loss.item()
            loss_count += 1

        print('')
        loss_mean = loss_sum / loss_count

        metrics = [
            ('period', period),
            ('loss', round(loss_mean, 3))
        ]
        util.print_metrics(metrics)

        torch.save(
            describer.state_dict(),
            'snapshots/' + params.header + DESCRIBER_FOOTER + '.pth')

        torch.save(
            reconstructor.state_dict(),
            'snapshots/' + params.header + RECONSTRUCTOR_FOOTER + '.pth')
コード例 #10
0
def train_manipulators(params):
    speaker_categs = torch.load(params.speaker_categs_path)
    num_speakers, speaker_feature_dim = speaker_categs.size()

    describer_model = util.load_model(params.header + DESCRIBER_FOOTER)
    describer = Describer(describer_model, speaker_feature_dim)
    describer.load_state_dict(torch.load(
        'snapshots/' + params.header + DESCRIBER_FOOTER + '.pth'))
    describer.eval()

    reconstructor_model = util.load_model(params.header + RECONSTRUCTOR_FOOTER)
    reconstructor = Reconstructor(reconstructor_model, params.log_frac)
    reconstructor.load_state_dict(torch.load(
        'snapshots/' + params.header + RECONSTRUCTOR_FOOTER + '.pth'))

    latent_forger_model = util.load_model(params.header + LATENT_FORGER_FOOTER)
    latent_forger = LatentForger(latent_forger_model)
    latent_forger.load_state_dict(torch.load(
        'snapshots/' + params.header + LATENT_FORGER_FOOTER + '.pth'))
    latent_forger.train()

    examiner_model = util.load_model(params.header + EXAMINER_FOOTER)
    distinguisher_model = util.load_model(params.header + DISTINGUISHER_FOOTER)
    discriminator = PairDiscriminator(examiner_model, distinguisher_model)
    util.initialize(discriminator)
    discriminator.train()

    if example_tensor.is_cuda:
        speaker_categs = speaker_categs.cuda()
        describer = describer.cuda()
        reconstructor = reconstructor.cuda()
        latent_forger = latent_forger.cuda()
        discriminator = discriminator.cuda()

    optim = torch.optim.Adam(
        latent_forger.parameters(),
        lr=params.lr)
    advers_optim = torch.optim.Adam(
        discriminator.parameters(),
        lr=params.advers_lr)

    data_loader = VCTKLoader(
        params.data_path, example_tensor, features='log')
    data_iterator = iter(data_loader)

    forgery_categ_loss_sum_batch = 0.0
    activity_loss_sum_batch = 0.0
    pretend_latent_loss_sum_batch = 0.0
    pretend_reconst_loss_sum_batch = 0.0
    gen_loss_batch = 0.0
    advers_loss_batch = 0.0

    loss_sum_batch = 0.0
    discrim_loss_sum_batch = 0.0
    num_in_batch = 0

    period = 0
    while period < params.num_periods:
        period += 1

        loss_sum = 0.0
        loss_count = 0

        print(util.COMMENT_HEADER, end='')

        for _ in range(params.period_size):
            orig, orig_speaker = next(data_iterator)
            target, target_speaker = next(data_iterator)

            orig_categ = speaker_categs[orig_speaker].unsqueeze(0)
            target_categ = speaker_categs[target_speaker].unsqueeze(0)

            target_latent, metadata = describer.latent(target)
            target_reconst = reconstructor.reconst(target_latent, metadata)
            target_reconst = target_reconst.detach()

            orig_latent, metadata = describer.latent(orig)
            orig_latent = orig_latent.detach()

            forgery_latent_raw = latent_forger.modify_latent(
                orig_latent, orig_categ, target_categ)
            forgery = reconstructor.reconst(forgery_latent_raw, metadata)

            (forgery_latent, metadata, pred_forgery_categ) = \
                describer.describe(forgery)

            activity_orig = torch.exp(orig).mean(dim=1)
            activity_forgery = torch.exp(forgery).mean(dim=1)

            pretend_latent = latent_forger.modify_latent(
                forgery_latent, target_categ, orig_categ)
            pretend_reconst = reconstructor.reconst(pretend_latent, metadata)

            truth = 0 if (np.random.random() < 0.5) else 1

            if truth == 0:
                decision = discriminator.discriminate(
                    target_categ, target_reconst, forgery)
            else:
                decision = discriminator.discriminate(
                    target_categ, forgery, target_reconst)

            forgery_categ_loss = describer.categ_loss(
                pred_forgery_categ, target_categ)
            activity_loss = ((activity_orig - activity_forgery) ** 2).mean(
                dim=list(range(activity_orig.dim())))
            pretend_latent_loss = describer.latent_loss(
                pretend_latent, orig_latent)
            pretend_reconst_loss = reconstructor.reconst_loss(
                pretend_reconst, orig)
            gen_loss = discriminator.gen_loss(decision, truth)
            advers_loss = discriminator.advers_loss(decision, truth)

            loss = (params.categ_term * forgery_categ_loss +
                    params.activity_term * activity_loss +
                    pretend_latent_loss +
                    pretend_reconst_loss +
                    params.advers_term * gen_loss)
            discrim_loss = advers_loss

            forgery_categ_loss_sum_batch += forgery_categ_loss.item()
            activity_loss_sum_batch += activity_loss.item()
            pretend_latent_loss_sum_batch += pretend_latent_loss.item()
            pretend_reconst_loss_sum_batch += pretend_reconst_loss.item()
            gen_loss_batch += gen_loss.item()
            advers_loss_batch += advers_loss.item()

            loss_sum_batch = loss_sum_batch + loss
            discrim_loss_sum_batch = discrim_loss_sum_batch + discrim_loss
            num_in_batch += 1

            if num_in_batch >= params.batch_size:
                mean_discrim_loss = discrim_loss_sum_batch / num_in_batch
                if gen_loss_batch / num_in_batch <= 10.0:
                    advers_optim.zero_grad()
                    mean_discrim_loss.backward(retain_graph=True)
                    advers_optim.step()

                mean_loss = loss_sum_batch / num_in_batch
                optim.zero_grad()
                mean_loss.backward()
                if period >= 1:
                    optim.step()

                print("(" + "|".join([
                    "%0.3f" % (forgery_categ_loss_sum_batch / num_in_batch),
                    "%0.3f" % (activity_loss_sum_batch / num_in_batch),
                    "%0.3f" % (pretend_latent_loss_sum_batch / num_in_batch),
                    "%0.3f" % (pretend_reconst_loss_sum_batch / num_in_batch),
                    "%0.3f" % (gen_loss_batch / num_in_batch),
                    "%0.3f" % (advers_loss_batch / num_in_batch)
                ]) + ")", end=' ', flush=True)

                forgery_categ_loss_sum_batch = 0.0
                activity_loss_sum_batch = 0.0
                pretend_latent_loss_sum_batch = 0.0
                pretend_reconst_loss_sum_batch = 0.0
                gen_loss_batch = 0.0
                advers_loss_batch = 0.0

                loss_sum_batch = 0.0
                discrim_loss_sum_batch = 0.0
                num_in_batch = 0

            loss_sum += loss.item()
            loss_count += 1

        print('')
        loss_mean = loss_sum / loss_count

        metrics = [
            ('period', period),
            ('loss', round(loss_mean, 3))
        ]
        util.print_metrics(metrics)

        torch.save(
            latent_forger.state_dict(),
            'snapshots/' + params.header + LATENT_FORGER_FOOTER + '.pth')
コード例 #11
0
def pretrain_manipulators(params):
    speaker_categs = torch.load(params.speaker_categs_path)
    num_speakers, speaker_feature_dim = speaker_categs.size()

    describer_model = util.load_model(params.header + DESCRIBER_FOOTER)
    describer = Describer(describer_model, speaker_feature_dim)
    describer.load_state_dict(torch.load(
        'snapshots/' + params.header + DESCRIBER_FOOTER + '.pth'))
    describer.eval()

    reconstructor_model = util.load_model(params.header + RECONSTRUCTOR_FOOTER)
    reconstructor = Reconstructor(reconstructor_model, params.log_frac)
    reconstructor.load_state_dict(torch.load(
        'snapshots/' + params.header + RECONSTRUCTOR_FOOTER + '.pth'))

    latent_forger_model = util.load_model(params.header + LATENT_FORGER_FOOTER)
    latent_forger = LatentForger(latent_forger_model)
    util.initialize(latent_forger)

    if example_tensor.is_cuda:
        speaker_categs = speaker_categs.cuda()
        describer = describer.cuda()
        reconstructor = reconstructor.cuda()
        latent_forger = latent_forger.cuda()

    optim = torch.optim.Adam(latent_forger.parameters(), lr=params.lr)

    data_loader = VCTKLoader(
        params.data_path, example_tensor, features='log')
    data_iterator = iter(data_loader)

    latent_loss_sum_batch = 0.0
    reconst_loss_sum_batch = 0.0
    loss_sum_batch = 0.0
    num_in_batch = 0

    period = 0
    while period < params.num_periods:
        period += 1

        loss_sum = 0.0
        loss_count = 0

        print(util.COMMENT_HEADER, end='')

        for _ in range(params.period_size):
            orig, orig_speaker = next(data_iterator)
            orig_categ = speaker_categs[orig_speaker].unsqueeze(0)

            forgery_categ = speaker_categs[
                np.random.randint(num_speakers)].unsqueeze(0)

            orig_latent, metadata = describer.latent(orig)
            orig_latent = orig_latent.detach()
            pretend_latent = latent_forger.modify_latent(
                orig_latent, forgery_categ, orig_categ)
            pretend_reconst = reconstructor.reconst(pretend_latent, metadata)

            latent_loss = latent_forger.pretrain_loss(
                pretend_latent, orig_latent)
            reconst_loss = reconstructor.reconst_loss(pretend_reconst, orig)

            loss = latent_loss + reconst_loss

            latent_loss_sum_batch += latent_loss.item()
            reconst_loss_sum_batch += reconst_loss.item()
            loss_sum_batch = loss_sum_batch + loss
            num_in_batch += 1

            if num_in_batch >= params.batch_size:
                mean_loss = loss_sum_batch / num_in_batch
                optim.zero_grad()
                mean_loss.backward()
                optim.step()

                print("(" + "|".join([
                    "%0.3f" % (latent_loss_sum_batch / num_in_batch),
                    "%0.3f" % (reconst_loss_sum_batch / num_in_batch)]) + ")",
                    end=' ', flush=True)

                latent_loss_sum_batch = 0.0
                reconst_loss_sum_batch = 0.0
                loss_sum_batch = 0.0
                num_in_batch = 0

            loss_sum += loss.item()
            loss_count += 1

        print('')
        loss_mean = loss_sum / loss_count

        metrics = [
            ('period', period),
            ('loss', round(loss_mean, 3))
        ]
        util.print_metrics(metrics)

        torch.save(
            latent_forger.state_dict(),
            'snapshots/' + params.header + LATENT_FORGER_FOOTER + '.pth')