Exemplo n.º 1
0
    def test_ImportUFONoSideEffectLO(self):
        """Checks that there are no side effects of the import of the LO UFO sm"""       
        ufo_model = ufomodels.load_model(import_ufo.find_ufo_path('sm'),False)
        original_all_particles = copy.copy(ufo_model.all_particles)
        original_all_vertices = copy.copy(ufo_model.all_vertices)
        original_all_couplings = copy.copy(ufo_model.all_couplings)
        original_all_lorentz = copy.copy(ufo_model.all_lorentz)
        original_all_parameters = copy.copy(ufo_model.all_parameters)
        original_all_orders = copy.copy(ufo_model.all_orders)
        original_all_functions = copy.copy(ufo_model.all_functions)

        ufo2mg5_converter = import_ufo.UFOMG5Converter(ufo_model)
        model = ufo2mg5_converter.load_model()
        # It is important to run import_ufo.OrganizeModelExpression(ufo_model).main() 
        # since this reverts some of the changes done in load_model()
        # There *is* side effects in-between, namely the expression of the CTcouplings
        # which contained CTparameters have been substituted to dictionaries.
        parameters, couplings = import_ufo.OrganizeModelExpression(ufo_model).main()        

        self.assertEqual(original_all_particles,ufo_model.all_particles)        
        self.assertEqual(original_all_vertices,ufo_model.all_vertices)
        self.assertEqual(original_all_couplings,ufo_model.all_couplings)
        self.assertEqual(original_all_lorentz,ufo_model.all_lorentz)
        self.assertEqual(original_all_parameters,ufo_model.all_parameters)
        self.assertEqual(original_all_orders,ufo_model.all_orders)
        self.assertEqual(original_all_functions,ufo_model.all_functions)
Exemplo n.º 2
0
    def test_ImportUFONoSideEffectNLO(self):
        """Checks that there are no side effects of the import of the NLO UFO sm"""
        ufo_model = ufomodels.load_model(import_ufo.find_ufo_path('loop_sm'),False)
        original_all_particles = copy.copy(ufo_model.all_particles)
        original_all_vertices = copy.copy(ufo_model.all_vertices)
        original_all_couplings = copy.copy(ufo_model.all_couplings)
        original_all_lorentz = copy.copy(ufo_model.all_lorentz)
        original_all_parameters = copy.copy(ufo_model.all_parameters)
        original_all_orders = copy.copy(ufo_model.all_orders)
        original_all_functions = copy.copy(ufo_model.all_functions)
        original_all_CTvertices = copy.copy(ufo_model.all_CTvertices)
        original_all_CTparameters = copy.copy(ufo_model.all_CTparameters)


        ufo2mg5_converter = import_ufo.UFOMG5Converter(ufo_model)
        model = ufo2mg5_converter.load_model()
        parameters, couplings = import_ufo.OrganizeModelExpression(ufo_model).main()        

        self.assertEqual(original_all_particles,ufo_model.all_particles)
        self.assertEqual(original_all_vertices,ufo_model.all_vertices)
#        self.assertEqual(original_all_couplings,ufo_model.all_couplings)
        self.assertEqual(original_all_lorentz,ufo_model.all_lorentz)
        self.assertEqual(original_all_parameters,ufo_model.all_parameters)
        self.assertEqual(original_all_orders,ufo_model.all_orders)
        self.assertEqual(original_all_functions,ufo_model.all_functions)
        self.assertEqual(original_all_CTvertices,ufo_model.all_CTvertices)
        self.assertEqual(original_all_CTparameters,ufo_model.all_CTparameters)
Exemplo n.º 3
0
 def __init__(self, modelpath, addon='__1'):
     """load the model from a valid UFO directory (otherwise keep everything
     as empty."""
     
     self.modelpath = modelpath
     model = ufomodels.load_model(modelpath)
     
     # Check the validity of the model. Too old UFO (before UFO 1.0)
     if not hasattr(model, 'all_orders'):
         raise USRMODERROR, 'Base Model doesn\'t follows UFO convention (no couplings_order information)\n' +\
                            'MG5 is able to load such model but NOT to the add model feature.'
     if isinstance(model.all_particles[0].mass, basestring):
         raise USRMODERROR, 'Base Model doesn\'t follows UFO convention (Mass/Width of particles are string name, not object)\n' +\
                            'MG5 is able to load such model but NOT to the add model feature.' 
                              
     
     self.particles = model.all_particles
     if any(hasattr(p, 'loop_particles') for p in self.particles):
         raise USRMODERROR, 'Base Model doesn\'t follows UFO convention '
     self.vertices = model.all_vertices
     self.couplings = model.all_couplings
     self.lorentz = model.all_lorentz
     self.parameters = model.all_parameters
     self.Parameter = self.parameters[0].__class__
     self.orders = model.all_orders
     
     self.functions = model.all_functions
     self.new_external = []
     # UFO optional file
     if hasattr(model, 'all_propagators'):
         self.propagators = model.all_propagators
     else:
         self.propagators = [] 
         
     # UFO NLO extension
     if hasattr(model, 'all_CTvertices'):
         self.CTvertices = model.all_CTvertices
     else:
         self.CTvertices = []
     
     #translate for how to write the python file
     if 'self.expr = expression' in open(pjoin(self.modelpath, 'object_library.py')).read():
         self.translate = {'expr': 'expression'}
     else:
         self.translate = {}
     
     #translate for the expression of the UFO model
     self.old_new = {}
     self.addon = addon
     
     # particle id -> object
     self.particle_dict = {}
     for particle in self.particles:
         self.particle_dict[particle.pdg_code] = particle
         
     # path to all model that should be used for the Fortran file.
     self.all_path = [self.modelpath]
Exemplo n.º 4
0
def test_tautologies(train, dev, glove, paths = ['aug0','aug005','aug015','aug05']):
    testsets = [dev, generate_tautologies(dev), generate_contradictions(dev), generate_neutral(dev)]
    names = ['dev' , 'ent', 'contr' ,'neu']
    for path in paths:
        print path
        model_path = misc.best_model_path('models/' + path)
        model = models.load_model(model_path)
        accs = [models.test_model(model, dataset, glove) for dataset in testsets]
	for name, dataset, acc in zip (names, testsets, accs):
	    print name, acc, len(dataset)
Exemplo n.º 5
0
 def test_ImportUFOcheckgoldstone(self):
     """Check goldstone is correct in NLO UFO"""
     ufo_model = ufomodels.load_model(import_ufo.find_ufo_path('loop_qcd_qed_sm'),False)
     original_all_particles = copy.copy(ufo_model.all_particles)
     for part in original_all_particles:
         if part.name.lower() in ['g0','g+']:
             if hasattr(part,"GoldstoneBoson"):
                 self.assertEqual(part.GoldstoneBoson,True)
             elif hasattr(part,"goldstoneboson"):
                 self.assertEqual(part.goldstoneboson,True)
             else:
                 raise import_ufo.UFOImportError, "Goldstone %s has no attribute of goldstnoneboson in loop_qcd_qed_sm"%part.name
Exemplo n.º 6
0
def test_all_models(dev, test, glove, folder = 'models/'):
    files = os.listdir(folder)
    extless = set([file.split('.')[0] for file in files if os.path.isfile(file)]) - set([''])
    epoch_less = set([file.split('~')[0] for file in extless])
    for model_short in epoch_less:
	if model_short in extless:
	    modelname = model_short
	else:
            same_exper = [m for m in extless if m.startswith(model_short)]
	    epoch_max = max([int(file.split('~')[1]) for file in same_exper]) 
	    modelname = model_short + '~' + str(epoch_max)
	
	print modelname
	model = models.load_model(folder + modelname)
	dev_acc = models.test_model(model, dev, glove)
        test_acc = models.test_model(model, test, glove)
	print "Dev:", '{0:.2f}'.format(dev_acc * 100), "Test_acc:", '{0:.2f}'.format(test_acc * 100)
	print 
Exemplo n.º 7
0
 def add_model(self, model=None, path=None, identify_particles=None):
     """add another model in the current one"""
     
     self.new_external = []
     if path:
         model = ufomodels.load_model(path) 
             
     if not model:
         raise USRMODERROR, 'Need a valid Model'
     else:
         path = model.__path__[0]
     # Check the validity of the model. Too old UFO (before UFO 1.0)
     if not hasattr(model, 'all_orders'):
         raise USRMODERROR, 'Add-on Model doesn\'t follows UFO convention (no couplings_order information)\n' +\
                            'MG5 is able to load such model but NOT to the add model feature.'
     if isinstance(model.all_particles[0].mass, basestring):
         raise USRMODERROR, 'Add-on Model doesn\'t follows UFO convention (Mass/Width of particles are string name, not object)\n' +\
                            'MG5 is able to load such model but NOT to the add model feature.' 
 
     for order in model.all_orders:
         if hasattr(order, 'perturbative_expansion') and order.perturbative_expansion:
             raise USRMODERROR, 'Add-on model can not be loop model.' 
                           
     for order in model.all_orders:
         self.add_coupling_order(order)
     
     # Adding automatically identification for anti-particle if needed
     # + define identify_pid which keep tracks of the pdg_code identified
     if identify_particles:
         identify_pid = {}
         for new, old in identify_particles.items():
             new_part = next((p for p in model.all_particles if p.name==new), None)
             old_part = next((p for p in self.particles if p.name==old), None)
             identify_pid[new_part.pdg_code] = old_part.pdg_code
             
             if new_part is None:
                 raise USRMODERROR, "particle %s not in added model" % new
             if old_part is None:
                 raise USRMODERROR, "particle %s not in original model" % old
             if new_part.antiname not in identify_particles:
                 new_anti = new_part.antiname
                 old_anti = old_part.antiname
                 misc.sprint(old, new, new_anti, old_anti, old_part.antiname)
                 if old_anti == old:
                     raise USRMODERROR, "failed identification (one particle is self-conjugate and not the other)"
                 logger.info("adding identification for anti-particle: %s=%s" % (new_anti, old_anti))
                 identify_particles[new_anti] = old_anti
     
     for parameter in model.all_parameters:
         self.add_parameter(parameter, identify_pid)
     for coupling in model.all_couplings:
         self.add_coupling(coupling)
     for lorentz in model.all_lorentz:
         self.add_lorentz(lorentz)
     for particle in model.all_particles:
         if particle.name in identify_particles:
             self.add_particle(particle, identify=identify_particles[particle.name])
         else:
             self.add_particle(particle)
     for vertex in model.all_vertices:
         self.add_interaction(vertex)
     
     self.all_path.append(path)
     
     
     return
Exemplo n.º 8
0
    def test_identify_particle(self):
        
        GC_1 = Coupling(name = 'GC_1',
                  value = '(ee*complex(0,1)*complexconjugate(CKM3x100))/(sw*cmath.sqrt(2))',
                  order = {'QED':1})        
        #self.base_model.add_coupling(GC_1)
        M5 = Parameter(name = 'M5',
               nature = 'external',
               type = 'real',
               value = 125,
               texname = '\\text{MH}',
               lhablock = 'MASS',
               lhacode = [ 105 ]) 
        W5 = Parameter(name = 'W5',
               nature = 'external',
               type = 'real',
               value = 125,
               texname = '\\text{MH}',
               lhablock = 'DECAY',
               lhacode = [ 105 ]) 
        #self.base_model.add_parameter(M5)
        #self.base_model.add_parameter(W5)
        
        L = Lorentz(name = 'FFS2',
               spins = [ 2, 2, 1 ],
               structure = 'Identity(2,1)')
        #self.base_model.add_lorentz(L)

        B = Particle(pdg_code = 105,
             name = 'B',
             antiname = 'B',
             spin = 1,
             color = 1,
             mass = M5,
             width = W5,
             texname = 'H',
             antitexname = 'H',
             charge = 0,
             GhostNumber = 0,
             LeptonNumber = 0,
             Y = 0) 
        #self.base_model.add_particle(B)
        
        V_2 = Vertex(name = 'V_2',
             particles = [ B, B, B, B ],
             color = [ '1' ],
             lorentz = [ L ],
             couplings = {(0,0): GC_1})
        self.mymodel.__path__ = '.'
        self.base_model.add_model(self.mymodel, identify_particles={'B':'H'})
        
        # check that the B object still has is name/pdg_code
        self.assertEqual(B.pdg_code, 105)
        self.assertEqual(B.name, 'B')
        # check that the original model still has the H particles
        model = ufomodels.load_model(self.sm_path)
        particles_name = [p.name for p in model.all_particles]
        self.assertTrue('H' in particles_name)
        self.assertFalse('B' in particles_name)
        # check the mass
        parameters_name = [p.name for p in model.all_parameters]
        self.assertTrue('MH' in parameters_name)
        self.assertFalse('M5' in parameters_name)        
        
        
        
        
        
                        
Exemplo n.º 9
0
                    choices=['resnet50', 'resnet3d50'])
args = parser.parse_args()
#Load video
videoPath = './tmp'
imgPath = './tmp/frames'
os.makedirs(videoPath, exist_ok=True)
os.makedirs(imgPath, exist_ok=True)
start = 10
end = 30
video_hash = 'Y6m6DYJ7RW8'
yt = YouTube('https://youtube.com/embed/%s?start=%d&end=%d' %
             (video_hash, start, end))
video = yt.streams.all()[0]
name = video.download('/tmp')
# Load model
model = models.load_model(args.arch)

av_categories = pd.read_csv('CVS_Actions.csv', delimiter=';').values.tolist()
trax = pd.read_csv('audioTracks_final.csv')

# Get dataset categories
categories = models.load_categories()

# Load the video frame transform
transform = models.load_transform()

# Obtain video frames
if args.frame_folder is not None:
    print('Loading frames in {}'.format(args.frame_folder))
    import glob
    # here make sure after sorting the frame paths have the correct temporal order
    #####################################
    print('')
    print('Describe movie with base FE.')

    # Load model
    print('Loading model from checkpoint {}'.format(
        config.model.checkpoint_path))
    checkpoint = torch.load(config.model.checkpoint_path)
    embedding_size = checkpoint['embedding_size']

    # CUDA for PyTorch
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if use_cuda else "cpu")

    model = models.load_model(config.model.model_arch,
                              device,
                              embedding_size=embedding_size)
    model.load_state_dict(checkpoint['model_state_dict'])

    filename = os.path.join(config.dataset.movie.dataset_path, 'bbx.txt')
    bbx_list = utils.read_file_to_list(filename)

    plotter = utils.VisdomPlotter(config.visdom.server,
                                  env_name='video_annotation',
                                  port=config.visdom.port)

    vd_utils.annotate_video(
        config.dataset.movie.movie_path,
        config.output.video_dir,
        model,
        device,
Exemplo n.º 11
0
train_csv = os.path.join(DATA_PATH, 'train.csv')
img_path = os.path.join(DATA_PATH, 'train_images')
kfold_path = 'kfold.pkl'
data_path = '/home/jianglb/pythonproject/cloud_segment/data/train_{}_{}'.format(
    *RESIZE)
# data_path = '/home/noel/pythonproject/cloud_segment/data/train_{}_{}'.format(*RESIZE)
test_data = os.path.join(DATA_PATH, 'test_{}_{}'.format(*RESIZE))

cls_probs = []
cloud_class = 0

with torch.no_grad():
    for fold in range(K):
        print('Fold{}:'.format(fold))
        cls_model = load_model(model_name,
                               classes=4,
                               dropout=0.,
                               pretrained=False)
        cls_model.load_state_dict(
            torch.load(os.path.join(save_dir, 'model_{}.pth'.format(fold))))
        cls_model.cuda()
        cls_model.eval()

        preprocessing_fn = smp.encoders.get_preprocessing_fn(
            'resnet34', 'imagenet')

        # validate
        cls_probs_fold = 0
        for tt in range(4):
            validate_dataset = CloudTrainDataset2(
                train_csv,
                data_path,
Exemplo n.º 12
0
    # data_name = os.path.basename(dir_imgs)
    data_name = "fsa_MSE_CMU_fusion"
    save_path = os.path.join(save_dir, data_name)
    
    save_imgs_path = os.path.join(save_path, "processed_imgs")
    save_cropped_path = os.path.join(save_path, "cropped_imgs")
    save_video_path = os.path.join(save_path, "processed_videos")
    Path(save_dir).mkdir(parents=True, exist_ok=True)
    Path(save_imgs_path).mkdir(parents=True, exist_ok=True)
    Path(save_cropped_path).mkdir(parents=True, exist_ok=True)
    Path(save_video_path).mkdir(parents=True, exist_ok=True)
    # Path(save_imgs_AFLW2000_path).mkdir(parents=True, exist_ok=True)

    config = yaml.load(open(var_config_path))
    net_config = config['Net']
    model = load_model(**net_config)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"[INFO] Device: {device}")
    # To device
    model = model.to(device)

    if torch.cuda.is_available():
        model = nn.DataParallel(model)

    uni_model, target_size, num_workers, batch_size, n_class = create_model(uni_config_path)
    var_model, target_size, num_workers, batch_size, n_class = create_model(var_config_path)
    wei_model, target_size, num_workers, batch_size, n_class = create_model(wei_config_path)

    uni_model = load_pretrain_model(uni_model, uni_model_path)
    var_model = load_pretrain_model(var_model, var_model_path)
Exemplo n.º 13
0
import sys
import torch
import json
import pickle
import gzip
from suggest_utils import calc_reviewer_db_mapping, print_text_report, print_progress
from suggest_reviewers import create_embeddings, calc_similarity_matrix
from models import load_model
import torch

accepted_submissions = pickle.load(open("../data/pkl/cached_or.pkl", "br"))

# Load the model
abstracts = []
abstract_keys = list(accepted_submissions.keys())
for k, v in accepted_submissions.items():
    abstracts.append(v.content["abstract"])
conf_abs = abstracts

print('Loading model', file=sys.stderr)
model, epoch = load_model(None, "scratch/similarity-model.pt")
model.eval()
assert not model.training

# Get recommendations within the conference
paper_embs = create_embeddings(model, conf_abs)

pickle.dump(paper_embs, open("paper_embeddings.pkl", "bw"))
Exemplo n.º 14
0
            new_db.append(paper)
    db = new_db
    db_abs = [x['paperAbstract'] for x in db]
    rdb = calc_reviewer_db_mapping(reviewer_data,
                                   db,
                                   author_col=args.filter_field,
                                   author_field='authors')

    # Calculate or load paper similarity matrix
    if args.load_paper_matrix:
        mat = np.load(args.load_paper_matrix)
        assert (mat.shape[0] == len(submission_abs)
                and mat.shape[1] == len(db_abs))
    else:
        print('Loading model', file=sys.stderr)
        model, epoch = load_model(None, args.model_file, force_cpu=True)
        model.eval()
        assert not model.training
        mat = calc_similarity_matrix(model, db_abs, submission_abs)
        if args.save_paper_matrix:
            np.save(args.save_paper_matrix, mat)

    # Calculate reviewer scores based on paper similarity scores
    if args.load_aggregate_matrix:
        reviewer_scores = np.load(args.load_aggregate_matrix)
        assert (reviewer_scores.shape[0] == len(submission_abs)
                and reviewer_scores.shape[1] == len(reviewer_names))
    else:
        print('Calculating aggregate reviewer scores', file=sys.stderr)
        reviewer_scores = calc_aggregate_reviewer_score(
            rdb, mat, args.aggregator)
Exemplo n.º 15
0
Arquivo: eval.py Projeto: wyb330/nmt
def main(args, max_data_size=0, shuffle=True, display=False):
    hparams.set_hparam('batch_size', 10)
    hparams.add_hparam('is_training', False)
    check_vocab(args)
    datasets, src_data_size = load_dataset(args)
    iterator = iterator_utils.get_eval_iterator(hparams, datasets, hparams.eos, shuffle=shuffle)
    src_vocab, tgt_vocab, src_dataset, tgt_dataset, tgt_reverse_vocab, src_vocab_size, tgt_vocab_size = datasets
    hparams.add_hparam('vocab_size_source', src_vocab_size)
    hparams.add_hparam('vocab_size_target', tgt_vocab_size)

    sess, model = load_model(hparams, tf.contrib.learn.ModeKeys.EVAL, iterator, src_vocab, tgt_vocab, tgt_reverse_vocab)

    if args.restore_step:
        checkpoint_path = os.path.join(args.model_path, 'nmt.ckpt')
        ckpt = '%s-%d' % (checkpoint_path, args.restore_step)
    else:
        ckpt = tf.train.latest_checkpoint(args.model_path)
    saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
    if ckpt:
        saver.restore(sess, ckpt)
    else:
        raise Exception("can not found checkpoint file")

    src_vocab_file = os.path.join(args.model_path, 'vocab.src')
    src_reverse_vocab = build_reverse_vocab_table(src_vocab_file, hparams)
    sess.run(tf.tables_initializer())

    step_count = 1
    with sess:
        logger.info("starting evaluating...")
        sess.run(iterator.initializer)
        eos = hparams.eos.encode()
        references = []
        translations = []
        start_time = time.time()
        while True:
            try:
                if (max_data_size > 0) and (step_count * hparams.batch_size > max_data_size):
                    break
                if step_count % 10 == 0:
                    t = time.time() - start_time
                    logger.info('step={0} total={1} time={2:.3f}'.format(step_count, step_count * hparams.batch_size, t))
                    start_time = time.time()
                predictions, source, target, source_text, confidence = model.eval(sess)
                reference = bpe2sent(target, eos)
                if hparams.beam_width == 1:
                    translation = bytes2sent(list(predictions), eos)
                else:
                    translation = bytes2sent(list(predictions[:, 0]), eos)

                for s, r, t in zip(source, reference, translation):
                    if display:
                        source_sent = src_reverse_vocab.lookup(tf.constant(list(s), tf.int64))
                        source_sent = sess.run(source_sent)
                        source_sent = text_utils.format_bpe_text(source_sent, eos)
                        print('{}\n{}\n{}\n'.format(source_sent, r, t))
                    references.append(r)
                    translations.append(t)

                if step_count % 100 == 0:
                    bleu_score = moses_multi_bleu(references, translations, args.model_path)
                    logger.info('bleu score = {0:.3f}'.format(bleu_score))

                step_count += 1
            except tf.errors.OutOfRangeError:
                logger.info('Done eval data')
                break

        logger.info('compute bleu score...')
        # bleu_score = compute_bleu_score(references, translations)
        bleu_score = moses_multi_bleu(references, translations, args.model_path)
        logger.info('bleu score = {0:.3f}'.format(bleu_score))
Exemplo n.º 16
0
if __name__ == '__main__':
    from models import load_model
    from torchvision import transforms
    from torchvision.datasets import MNIST, CIFAR10
    from imagenet import ImageNet
    from torch.utils.data import DataLoader
    from dataloader import *
    def give_perturbation(x):
        return x

    use_cuda = True
    dataset = load_dataset('cifar10')
    dataloader = load_dataloader(dataset)
    preprocess = get_preprocess(
        size=32, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    model = load_model('vgg16', 'cifar10', use_cuda=use_cuda)
    model.eval()

    # img = np.zeros((32,32,3)).astype('float32') ## 32,32,3 ndarray
    img = np.ones((32, 32, 3)).astype('float32')  # 32,32,3 ndarray
    img = img/2

    """
    img = give_perturbation(img)
    img = preprocess(img)
    img = torch.Tensor(img).unsqueeze(0)
    img = img.cuda()
    with torch.no_grad():
        logits = model(img)
        prediction = F.softmax(logits,dim = -1)
        prediction = prediction.cpu().detach().numpy()
Exemplo n.º 17
0
        n += len(sentences)
        if n % 10000 == 0:
            print('\r - Encoder: {:d} sentences'.format(n), end='')
    print('\r - Encoder: {:d} sentences'.format(n), end='')
    t = int(time.time() - t)
    if t < 1000:
        print(' in {:d}s'.format(t))
    else:
        print(' in {:d}m{:d}s'.format(t // 60, t % 60))
    fin.close()
    fout.close()


if __name__ == '__main__':
    parser = argparse.ArgumentParser()

    parser.add_argument("--load-file", help="path to saved model")
    parser.add_argument("--sp-model", help="sentencepiece model to use")
    parser.add_argument("--gpu",
                        default=1,
                        type=int,
                        help="whether to train on gpu")
    parser.add_argument("--sentence-file", help="sentence file")
    parser.add_argument("--output-file", help="prefix for output numpy file")
    args = parser.parse_args()

    model, _ = load_model(None, args)
    print(model.args)
    model.eval()
    embed_all(args, model)
Exemplo n.º 18
0
def main(args, max_data_size=0):
    vocab_dir = args.vocab_dir
    log_file_handler = logging.FileHandler(os.path.join(vocab_dir, 'train.log'))
    logger.addHandler(log_file_handler)

    check_vocab(args, vocab_dir)
    datasets = load_dataset(args, vocab_dir)
    iterator = iterator_utils.get_iterator(hparams, datasets, max_rows=max_data_size)
    src_vocab, tgt_vocab, _, _, src_vocab_size, tgt_vocab_size = datasets
    hparams.add_hparam('is_training', True)
    hparams.add_hparam('vocab_size_source', src_vocab_size)
    hparams.add_hparam('vocab_size_target', tgt_vocab_size)
    pprint(hparams.values())
    sess, model = load_model(hparams, tf.contrib.learn.ModeKeys.TRAIN, iterator, src_vocab, tgt_vocab)

    if args.restore_step > 0:
        checkpoint_path = os.path.join(vocab_dir, 'nmt.ckpt')
        ckpt = '%s-%d' % (checkpoint_path, hparams.restore_step)
    else:
        ckpt = tf.train.latest_checkpoint(vocab_dir)
    saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
    if ckpt:
        saver.restore(sess, ckpt)
    else:
        sess.run(tf.global_variables_initializer())
        print("Created model with fresh parameters.")

    sess.run(tf.tables_initializer())
    with sess:
        writer = tf.summary.FileWriter(vocab_dir, sess.graph)
        logger.info("starting training...")
        epochs = 1
        step_in_epoch = 0
        learning_rate = hparams.learning_rate
        checkpoint_path = os.path.join(vocab_dir, "nmt.ckpt")

        sess.run(iterator.initializer)
        while epochs <= args.num_train_epochs:
            start_time = time.time()
            try:
                loss, global_step, learning_rate, accuracy, summary = model.step(sess)
                step_in_epoch += 1
                if global_step % args.summary_per_steps == 0:
                    write_summary(writer, summary, global_step)

            except tf.errors.OutOfRangeError:
                logger.info('{} epochs finished'.format(epochs))
                # saver.save(sess, checkpoint_path, global_step=global_step)
                epochs += 1
                step_in_epoch = 1
                sess.run(iterator.initializer)
                continue

            sec_per_step = time.time() - start_time
            logger.info("Epoch %-3d Step %-d - %-d [%.3f sec, loss=%.4f, acc=%.3f, lr=%f]" %
                        (epochs, global_step, step_in_epoch, sec_per_step, loss, accuracy, learning_rate))

            if global_step % args.steps_per_checkpoint == 0:
                model_checkpoint_path = saver.save(sess, checkpoint_path, global_step=global_step)
                logger.info("Saved checkpoint to {}".format(model_checkpoint_path))

            if math.isnan(loss) or math.isinf(loss):
                raise Exception('loss overflow')

        writer.close()
X_full_train, X_sketch_train, X_full_val, X_sketch_val = load_data(
    data_folder, dset, image_data_format)
img_dim = X_full_train.shape[-3:]

# Get the number of non overlapping patch and the size of input image to the discriminator
nb_patch, img_dim_disc = get_nb_patch(img_dim, patch_size, image_data_format)

try:

    # Create optimizers
    opt_dcgan = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    opt_discriminator = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

    # Load generator model
    generator_model = load_model("generator_unet_%s" % generator, img_dim,
                                 nb_patch, bn_mode, use_mbd, batch_size,
                                 do_plot)
    # Load discriminator model
    discriminator_model = load_model("DCGAN_discriminator", img_dim_disc,
                                     nb_patch, bn_mode, use_mbd, batch_size,
                                     do_plot)

    # Compile generator model
    generator_model.compile(loss='mae', optimizer=opt_discriminator)
    discriminator_model.trainable = False

    # Define DCGAN model
    DCGAN_model = DCGAN(generator_model, discriminator_model, img_dim,
                        patch_size, image_data_format)

    # Define loss function and loss weights
Exemplo n.º 20
0
"""Train a model"""

import sys
from helpers import *
from models import load_model

if len(sys.argv) < 4:
    print "USAGE: python train.py NAME MODEL_TYPE DATA_DIR (CHECKPOINT_HDF5_FILE)"
    sys.exit(0)

# Inputs
name = sys.argv[1]
type = sys.argv[2]
data_dir = sys.argv[3]
checkpoint = sys.argv[4] if len(sys.argv) > 4 else None

# Load model
model = load_model(name, type, tmp_dir, checkpoint)

# Get class weights
female_train, female_test, male_train, male_test = get_data(data_dir)
class_weight = {0: float(len(male_train)) / float(len(female_train)), 1: 1.}

# Train model
img_dir, meta_dir, train_dir, test_dir = get_directories(data_dir)
model.train(train_dir, test_dir, class_weight=class_weight)
model.save(os.path.join(results_dir, name + '.h5'))
Exemplo n.º 21
0
# -*- coding: utf-8 -*-

from models import load_model
import my_utils
from torch.utils.data import DataLoader
import torch

args = my_utils.get_args()
data_path,noc = my_utils.get_data_path(args.dataset,'./config.txt')
model = load_model(args.model,noc).cuda()
train_images = data_path+'/train/images'
train_labels = data_path+'/train/labels'
val_images = data_path+'/validation/images'
val_labels = data_path+'/validation/labels'
test_images = data_path+'/test/images'
test_labels = data_path+'/test/labels'
# DATA LOADERS
train_loader = DataLoader(my_utils.getDataset(train_images,
                                              train_labels,
                                              size = (360,480)),
                          batch_size=args.batch_size,
                          num_workers=args.num_of_workers,
                          shuffle=True)

val_loader = DataLoader(my_utils.getDataset(val_images,
                                            val_labels,
                                            size = (360,480)),
                        batch_size=args.batch_size,
                        num_workers=args.num_of_workers,
                        shuffle=False)
test_loader = DataLoader(my_utils.getDataset(test_images,
Exemplo n.º 22
0
def load_video(video_hash):
    yt = YouTube('https://youtube.com/embed/%s?start=%d&end=%d' %
                 (video_hash, start, end))
    video = yt.streams.all()[0]
    name = video.download('/tmp')
    #   Load model
    model = models.load_model(arch)

    av_categories = pd.read_csv('CVS_Actions(NEW).csv',
                                delimiter=';').values.tolist()
    trax = pd.read_csv('audioTracks_urls.csv')

    # Get dataset categories
    #categories = models.load_categories()

    # Load the video frame transform
    transform = models.load_transform()

    # Obtain video frames
    if frame_folder is not None:
        print('Loading frames in {}'.format(frame_folder))
        import glob
        # here make sure after sorting the frame paths have the correct temporal order
        frame_paths = sorted(glob.glob(os.path.join(frame_folder, '*.jpg')))
        print(frame_paths)
        frames = load_frames(frame_paths)
    else:
        print('Extracting frames using ffmpeg...')
        frames = extract_frames(name, num_segments)

    # Prepare input tensor
    if arch == 'resnet3d50':
        # [1, num_frames, 3, 224, 224]
        input = torch.stack([transform(frame) for frame in frames],
                            1).unsqueeze(0)
    else:
        # [num_frames, 3, 224, 224]
        input = torch.stack([transform(frame) for frame in frames])

    # Make video prediction
    with torch.no_grad():
        logits = model(input)
        h_x = F.softmax(logits, 1).mean(dim=0)
        probs, idx = h_x.sort(0, True)

    # Output the prediction.

    print('RESULT ON ' + name)
    y = float(av_categories[idx[0]][1]) * 125
    x = float(av_categories[idx[0]][2]) * 125

    trax = trax.assign(
        dist=lambda row: np.sqrt((x - row.valence)**2 + (y - row.energy)**2))
    print('min', trax['dist'].min())

    best = trax.nsmallest(100, 'dist')
    print(best)

    rand = randint(0, 9)
    print(rand)
    choice = best.iloc[rand, [1, 2, 5]]

    print('choice', choice)

    song = 'valence: ' + str(x) + ' arousal: ' + str(
        y) + " " + choice[0] + ' ' + choice[1]
    print(song)
    print(x, y)
    for i in range(0, 5):
        print('{:.3f} -> {} ->{}'.format(probs[i], idx[i],
                                         av_categories[idx[i]]))
        print('result   cutegories', av_categories[idx[i]][0],
              av_categories[idx[i]][1])

    #r = requests.get(match.iloc[0,2], allow_redirects=True)
    r = requests.get(choice[2], allow_redirects=True)
    open('./tmp/preview.mp3', 'wb').write(r.content)
    # Render output frames with prediction text.
    rendered_output = './tmp/' + video_hash + '_' + str(x) + '_' + str(
        y) + '.mp4'
    if rendered_output is not None:
        clip = VideoFileClip(name).subclip(30, 60)
        audioclip = AudioFileClip('./tmp/preview.mp3')
        txt_clip = TextClip(song, fontsize=16, color='white')
        clip_final = clip.set_audio(audioclip)
        video = CompositeVideoClip([clip_final, txt_clip])
        video.set_duration(30).write_videofile(rendered_output)
Exemplo n.º 23
0
    #     skip_difficult=True,
    #     **common_args
    # )
    generator = PascalVocGenerator(
        'datasets/VOC2007',
        'test',
        shuffle_groups=False,
        skip_truncated=False,
        skip_difficult=True,
        **common_args
    )
    model_path = 'snapshots/2019-08-25/resnet101_pascal_07_0.7352.h5'
    # load retinanet model
    import keras.backend as K
    K.clear_session()
    K.set_learning_phase(1)
    model = models.load_model(model_path, backbone_name='resnet101')
    # if the model is not converted to an inference model, use the line below
    # see: https://github.com/fizyr/keras-retinanet#converting-a-training-model-to-inference-model
    model = models.convert_model(model)
    average_precisions = evaluate(generator, model, epoch=0)
    # compute per class average precision
    total_instances = []
    precisions = []
    for label, (average_precision, num_annotations) in average_precisions.items():
        print('{:.0f} instances of class'.format(num_annotations), generator.label_to_name(label), 'with average precision: {:.4f}'.format(average_precision))
        total_instances.append(num_annotations)
        precisions.append(average_precision)
    mean_ap = sum(precisions) / sum(x > 0 for x in total_instances)
    print('mAP: {:.4f}'.format(mean_ap))
Exemplo n.º 24
0
def export(args):
    model = models.load_model(args.model_dir)
    model.export_inference_model(args.save_dir)
Exemplo n.º 25
0
def main() -> None:

    parser = argparse.ArgumentParser(description="Flower")
    parser.add_argument(
        "--server_address",
        type=str,
        default=DEFAULT_SERVER_ADDRESS,
        help=f"gRPC server address (default: {DEFAULT_SERVER_ADDRESS})",
    )
    parser.add_argument(
        "--rounds",
        type=int,
        default=1,
        help="Number of rounds of federated learning (default: 1)",
    )
    parser.add_argument(
        "--sample_fraction",
        type=float,
        default=1.0,
        help=
        "Fraction of available clients used for fit/evaluate (default: 1.0)",
    )
    parser.add_argument(
        "--min_sample_size",
        type=int,
        default=2,
        help="Minimum number of clients used for fit/evaluate (default: 2)",
    )
    parser.add_argument(
        "--min_num_clients",
        type=int,
        default=2,
        help=
        "Minimum number of available clients required for sampling (default: 2)",
    )
    parser.add_argument(
        "--model",
        type=str,
        default="simple-cnn",
        help="Model to use for training (default: simple-cnn)",
    )
    parser.add_argument(
        "--dataset",
        type=str,
        default="cifar-10",
        help="Dataset to use fro training (default: cifar-10)",
    )
    parser.add_argument(
        "--device",
        type=str,
        default="CPU",
        help="Device to run the model on (default: CPU)",
    )
    parser.add_argument(
        "--strategy",
        type=str,
        default="FedAvg",
        help="Aggregation strategy (default: FedAvg)",
    )
    parser.add_argument(
        "--epochs",
        type=int,
        default=1,
        help=
        "Number of local epochs to run on each client before aggregation (default: 1)",
    )
    parser.add_argument(
        "--batch_size",
        type=int,
        default=32,
        help="Batch size to be used by each worker (default: 32)",
    )
    parser.add_argument(
        "--learning_rate",
        type=float,
        default=0.001,
        help="Learning rate to be used by each worker (default: 0.001)",
    )
    parser.add_argument(
        "--quantize",
        type=bool,
        default=False,
        help="Use quantization (default: False)",
    )
    parser.add_argument(
        "--quantize_bits",
        type=int,
        default=64,
        help="Quantization bits (default: 64)",
    )
    parser.add_argument(
        "--log_host",
        type=str,
        help="Logserver address (no default)",
    )
    args = parser.parse_args()

    # check for runnable device
    DEVICE = torch.device("cuda:0" if args.device == "GPU" else "cpu")

    # Configure logger
    fl.common.logger.configure("server", host=args.log_host)

    # Load evaluation data
    _, testset = datasets.load_data(dataset_name=args.dataset, framework="PT")

    # Create client_manager, strategy, and server
    client_manager = fl.server.SimpleClientManager()
    aggregation_strategy = get_strategy(
        strategy_name=args.strategy,
        fraction_fit=args.sample_fraction,
        min_fit_clients=args.min_sample_size,
        min_available_clients=args.min_num_clients,
        eval_fn=get_eval_fn(model=args.model, testset=testset, device=DEVICE),
        on_fit_config_fn=get_fit_config_fn(args),
        dummy_model=models.load_model(model_name=args.model, framework="PT"),
        quantize=args.quantize,
        quantize_bits=args.quantize_bits,
    )

    server = fl.server.Server(client_manager=client_manager,
                              strategy=aggregation_strategy)

    # Run server
    fl.server.start_server(
        args.server_address,
        server,
        config={"num_rounds": args.rounds},
    )
Exemplo n.º 26
0
def infer(args):
    resize_h = args.image_shape[1]
    resize_w = args.image_shape[0]

    test_transforms = transforms.Compose(
        [transforms.Resize((resize_w, resize_h)),
         transforms.Normalize()])
    model = models.load_model(args.model_dir)

    if not osp.exists(args.save_dir):
        os.makedirs(args.save_dir)

    # 图像背景替换
    if args.image_path is not None:
        if not osp.exists(args.image_path):
            raise Exception('The --image_path is not existed: {}'.format(
                args.image_path))
        if args.background_image_path is None:
            raise Exception(
                'The --background_image_path is not set. Please set it')
        else:
            if not osp.exists(args.background_image_path):
                raise Exception(
                    'The --background_image_path is not existed: {}'.format(
                        args.background_image_path))
        img = cv2.imread(args.image_path)
        score_map, im_info = predict(img, model, test_transforms)
        score_map = score_map[:, :, 1]
        score_map = recover(score_map, im_info)
        bg = cv2.imread(args.background_image_path)
        save_name = osp.basename(args.image_path)
        save_path = osp.join(args.save_dir, save_name)
        result = bg_replace(score_map, img, bg)
        cv2.imwrite(save_path, result)

    # 视频背景替换,如果提供背景视频则以背景视频作为背景,否则采用提供的背景图片
    else:
        is_video_bg = False
        if args.background_video_path is not None:
            if not osp.exists(args.background_video_path):
                raise Exception(
                    'The --background_video_path is not existed: {}'.format(
                        args.background_video_path))
            is_video_bg = True
        elif args.background_image_path is not None:
            if not osp.exists(args.background_image_path):
                raise Exception(
                    'The --background_image_path is not existed: {}'.format(
                        args.background_image_path))
        else:
            raise Exception(
                'Please offer backgound image or video. You should set --backbground_iamge_paht or --background_video_path'
            )

        disflow = cv2.DISOpticalFlow_create(
            cv2.DISOPTICAL_FLOW_PRESET_ULTRAFAST)
        prev_gray = np.zeros((resize_h, resize_w), np.uint8)
        prev_cfd = np.zeros((resize_h, resize_w), np.float32)
        is_init = True
        if args.video_path is not None:
            print('Please wait. It is computing......')
            if not osp.exists(args.video_path):
                raise Exception('The --video_path is not existed: {}'.format(
                    args.video_path))

            cap_video = cv2.VideoCapture(args.video_path)
            fps = cap_video.get(cv2.CAP_PROP_FPS)
            width = int(cap_video.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(cap_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
            save_name = osp.basename(args.video_path)
            save_name = save_name.split('.')[0]
            save_path = osp.join(args.save_dir, save_name + '.avi')

            cap_out = cv2.VideoWriter(
                save_path, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps,
                (width, height))

            if is_video_bg:
                cap_bg = cv2.VideoCapture(args.background_video_path)
                frames_bg = cap_bg.get(cv2.CAP_PROP_FRAME_COUNT)
                current_frame_bg = 1
            else:
                img_bg = cv2.imread(args.background_image_path)
            while cap_video.isOpened():
                ret, frame = cap_video.read()
                if ret:
                    score_map, im_info = predict(frame, model, test_transforms)
                    cur_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                    cur_gray = cv2.resize(cur_gray, (resize_w, resize_h))
                    score_map = 255 * score_map[:, :, 1]
                    optflow_map = postprocess(cur_gray, score_map, prev_gray, prev_cfd, \
                                              disflow, is_init)
                    prev_gray = cur_gray.copy()
                    prev_cfd = optflow_map.copy()
                    is_init = False
                    optflow_map = cv2.GaussianBlur(optflow_map, (3, 3), 0)
                    optflow_map = threshold_mask(optflow_map,
                                                 thresh_bg=0.2,
                                                 thresh_fg=0.8)
                    score_map = recover(optflow_map, im_info)

                    #循环读取背景帧
                    if is_video_bg:
                        ret_bg, frame_bg = cap_bg.read()
                        if ret_bg:
                            if current_frame_bg == frames_bg:
                                current_frame_bg = 1
                                cap_bg.set(cv2.CAP_PROP_POS_FRAMES, 0)
                        else:
                            break
                        current_frame_bg += 1
                        comb = bg_replace(score_map, frame, frame_bg)
                    else:
                        comb = bg_replace(score_map, frame, img_bg)

                    cap_out.write(comb)
                else:
                    break

            if is_video_bg:
                cap_bg.release()
            cap_video.release()
            cap_out.release()

        # 当没有输入预测图像和视频的时候,则打开摄像头
        else:
            cap_video = cv2.VideoCapture(0)
            if not cap_video.isOpened():
                raise IOError("Error opening video stream or file, "
                              "--video_path whether existing: {}"
                              " or camera whether working".format(
                                  args.video_path))
                return

            if is_video_bg:
                cap_bg = cv2.VideoCapture(args.background_video_path)
                frames_bg = cap_bg.get(cv2.CAP_PROP_FRAME_COUNT)
                current_frame_bg = 1
            else:
                img_bg = cv2.imread(args.background_image_path)
            while cap_video.isOpened():
                ret, frame = cap_video.read()
                if ret:
                    score_map, im_info = predict(frame, model, test_transforms)
                    cur_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                    cur_gray = cv2.resize(cur_gray, (resize_w, resize_h))
                    score_map = 255 * score_map[:, :, 1]
                    optflow_map = postprocess(cur_gray, score_map, prev_gray, prev_cfd, \
                                              disflow, is_init)
                    prev_gray = cur_gray.copy()
                    prev_cfd = optflow_map.copy()
                    is_init = False
                    optflow_map = cv2.GaussianBlur(optflow_map, (3, 3), 0)
                    optflow_map = threshold_mask(optflow_map,
                                                 thresh_bg=0.2,
                                                 thresh_fg=0.8)
                    score_map = recover(optflow_map, im_info)

                    #循环读取背景帧
                    if is_video_bg:
                        ret_bg, frame_bg = cap_bg.read()
                        if ret_bg:
                            if current_frame_bg == frames_bg:
                                current_frame_bg = 1
                                cap_bg.set(cv2.CAP_PROP_POS_FRAMES, 0)
                        else:
                            break
                        current_frame_bg += 1
                        comb = bg_replace(score_map, frame, frame_bg)
                    else:
                        comb = bg_replace(score_map, frame, img_bg)
                    cv2.imshow('HumanSegmentation', comb)
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break
                else:
                    break
            if is_video_bg:
                cap_bg.release()
            cap_video.release()
Exemplo n.º 27
0
def test(args):
    with open(args.config, "r") as fid:
        config = json.load(fid)

    if not args.disable_cuda:
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    dataset = config["data"]["dataset"]
    if not os.path.exists(f"datasets/{dataset}.py"):
        raise ValueError(f"Unknown dataset {dataset}")
    dataset = utils.module_from_file("dataset", f"datasets/{dataset}.py")

    input_size = config["data"]["num_features"]
    data_path = config["data"]["data_path"]
    preprocessor = dataset.Preprocessor(
        data_path,
        num_features=input_size,
        tokens_path=config["data"].get("tokens", None),
        lexicon_path=config["data"].get("lexicon", None),
        use_words=config["data"].get("use_words", False),
        prepend_wordsep=config["data"].get("prepend_wordsep", False),
    )
    data = dataset.Dataset(data_path, preprocessor, split=args.split)
    loader = utils.data_loader(data, config)

    criterion, output_size = models.load_criterion(
        config.get("criterion_type", "ctc"),
        preprocessor,
        config.get("criterion", {}),
    )
    criterion = criterion.to(device)
    model = models.load_model(config["model_type"], input_size, output_size,
                              config["model"]).to(device)
    models.load_from_checkpoint(model, criterion, args.checkpoint_path,
                                args.load_last)

    model.eval()
    meters = utils.Meters()
    for inputs, targets in loader:
        outputs = model(inputs.to(device))
        meters.loss += criterion(outputs, targets).item() * len(targets)
        meters.num_samples += len(targets)
        predictions = criterion.viterbi(outputs)
        for p, t in zip(predictions, targets):
            p, t = preprocessor.tokens_to_text(p), preprocessor.to_text(t)
            pw, tw = p.split(preprocessor.wordsep), t.split(
                preprocessor.wordsep)
            pw, tw = list(filter(None, pw)), list(filter(None, tw))
            tokens_dist = editdistance.eval(p, t)
            words_dist = editdistance.eval(pw, tw)
            print("CER: {:.3f}".format(tokens_dist * 100.0 /
                                       len(t) if len(t) > 0 else 0))
            print("WER: {:.3f}".format(words_dist * 100.0 /
                                       len(tw) if len(tw) > 0 else 0))
            print("HYP:", "".join(p))
            print("REF", "".join(t))
            print("=" * 80)
            meters.edit_distance_tokens += tokens_dist
            meters.edit_distance_words += words_dist
            meters.num_tokens += len(t)
            meters.num_words += len(tw)

    print("Loss {:.3f}, CER {:.3f}, WER {:.3f}, ".format(
        meters.avg_loss, meters.cer, meters.wer))
Exemplo n.º 28
0
def main(args):
    # set up project directories
    tb_logdir, snapshot_dir = prepare_dirs(args)
    # get logger
    logger = logging.getLogger('train')
    # tensorboard writer
    writer = get_writer(args, tb_logdir)

    use_cuda = torch.cuda.is_available() and args.cuda

    # set manual seed if required
    if args.seed is not None:
        torch.manual_seed(args.seed)
        if use_cuda:
            torch.cuda.manual_seed_all(args.seed)

    # check for cuda supports
    if use_cuda:
        device = torch.device("cuda:0")
        cudnn.benchmark = True
    else:
        device = torch.device("cpu")

    # snapshot frequency
    if args.snapshot_every > 0 and not args.evaluate:
        logger.info('Saving snapshots to {}'.format(snapshot_dir))

    # load model
    classes, in_channels = data_loader.num_classes(args.dataset)
    if args.subsample_classes > 0:
        classes = args.subsample_classes
    net = models.load_model(args.arch, classes=classes)

    logger.info('Creating model {}.'.format(args.arch))

    if torch.cuda.device_count() > 1:
        logger.info("Running on {} GPUs".format(torch.cuda.device_count()))
        net.features = torch.nn.DataParallel(net.features)

    # move net to device
    net = net.to(device=device)

    # get data loader for the specified dataset
    train_loader, test_loader, val_loader = data_loader.load_dataset(
        args.dataset,
        args.data_path,
        args.batch_size,
        shuffle=args.shuffle,
        augmentation=args.augmentation,
        noise=args.noise,
        split=args.split,
        num_workers=args.workers,
        split_seed=args.split_seed,
        noise_seed=args.noise_seed,
        stratified=args.stratified,
        nclasses=args.subsample_classes,
        class_sample_seed=args.class_sample_seed,
        no_normalization=args.unnormalize,
        upscale=args.upscale,
        upscale_padding=args.upscale_padding)
    # define loss
    criterion = nn.CrossEntropyLoss().to(device)

    start_epoch = args.start_epoch
    best_acc1, best_acc5 = 0, 0
    # load model from file
    if os.path.isfile(args.resume_from):
        # resume training given state dictionary
        optimizer, scheduler = load_optimizer(args, net)
        try:
            net, optimizer, scheduler, start_epoch, best_acc1, best_acc5 = snapshot.load_snapshot(
                net, optimizer, scheduler, args.resume_from, device)
            if args.override:
                override_hyperparams(args, optimizer, scheduler)
        except KeyError:
            classes, in_channels = data_loader.num_classes(args.dataset)
            if args.subsample_classes > 0:
                classes = args.subsample_classes
            net = snapshot.load_model(args.arch, classes, args.resume_from,
                                      device, in_channels)

    else:
        # define optimizer
        optimizer, scheduler = load_optimizer(args, net)

    # evaluate model
    if args.evaluate:
        val_loss, top1_acc, top5_acc = scores.evaluate(net,
                                                       test_loader,
                                                       criterion,
                                                       device,
                                                       topk=(1, 5))
        utils.print_val_loss(args.epochs, val_loss, top1_acc, top5_acc)
        writer.add_scalar('Loss/test', val_loss, args.epochs)
        writer.add_scalar('Accuracy/test/top1', top1_acc, args.epochs)
        writer.add_scalar('Accuracy/test/top5', top5_acc, args.epochs)
        writer.close()
        return

    if args.evaluate_train:
        train_loss, top1_acc, top5_acc = scores.evaluate(net,
                                                         train_loader,
                                                         criterion,
                                                         device,
                                                         topk=(1, 5))
        utils.print_train_loss_epoch(args.epochs, train_loss, top1_acc,
                                     top5_acc)
        if best_acc1 * best_acc5 > 0:
            # if nonzero, print best val accuracy
            utils.print_val_loss(args.epochs, -1., best_acc1, best_acc5)
        writer.add_scalar('Loss/train', train_loss, args.epochs)
        writer.add_scalar('Accuracy/train/top1', top1_acc, args.epochs)
        writer.add_scalar('Accuracy/train/top5', top5_acc, args.epochs)
        writer.close()
        return

    if args.eval_regularization_loss:
        regularization_loss = scores.compute_regularization_loss(
            net, args.weight_decay)
        utils.print_regularization_loss_epoch(args.epochs, regularization_loss)
        writer.add_scalar('Regularization loss', regularization_loss,
                          args.epochs)
        writer.close()
        return

    utils.print_model_config(args)

    if start_epoch == 0:
        filename = args.arch + '_init_' + str(start_epoch) + '.pt'
        logger.info("Saving model initialization to {}".format(filename))
        snapshot.save_model(net, filename, snapshot_dir)

    # train the model
    net.train()
    if val_loader is None and test_loader is not None:
        val_loader = test_loader
        logger.warning("Using TEST set to validate model during training!")
    net, converged = train(net,
                           args.epochs,
                           train_loader,
                           optimizer,
                           criterion,
                           scheduler,
                           device,
                           snapshot_dirname=snapshot_dir,
                           start_epoch=start_epoch,
                           snapshot_every=args.snapshot_every,
                           val_loader=val_loader,
                           kill_plateaus=args.kill_plateaus,
                           best_acc1=best_acc1,
                           writer=writer,
                           snapshot_all_until=args.snapshot_all_until,
                           filename=args.arch,
                           train_acc=args.train_acc)
    if test_loader is not None:
        val_loss, top1_acc, top5_acc = scores.evaluate(net,
                                                       test_loader,
                                                       criterion,
                                                       device,
                                                       topk=(1, 5))
        utils.print_val_loss(args.epochs, val_loss, top1_acc, top5_acc)
        net = net.train()
        writer.add_scalar('Loss/test', val_loss, args.epochs)
        writer.add_scalar('Accuracy/test/top1', top1_acc, args.epochs)
        writer.add_scalar('Accuracy/test/top5', top5_acc, args.epochs)

    # save final model
    if converged:
        filename = args.arch + '_' + str(args.epochs) + '.pt'
        snapshot.save_model(net, filename, snapshot_dir)

    writer.close()
Exemplo n.º 29
0
                                          args.dataroot,
                                          batch_size=args.batch_size)
else:
    trainloader, valloader = load_dataset(args.dataset,
                                          args.dataroot,
                                          'pair',
                                          batch_size=args.batch_size)

num_class = trainloader.dataset.num_classes
print('Number of train dataset: ', len(trainloader.dataset))
print('Number of validation dataset: ', len(valloader.dataset))

# Model
print('==> Building model: {}'.format(args.model))

net = models.load_model(args.model, num_class)
# print(net)

if use_cuda:
    torch.cuda.set_device(args.sgpu)
    net.cuda()
    print(torch.cuda.device_count())
    print('Using CUDA..')

if args.ngpu > 1:
    net = torch.nn.DataParallel(net,
                                device_ids=list(
                                    range(args.sgpu, args.sgpu + args.ngpu)))

optimizer = optim.SGD(net.parameters(),
                      lr=args.lr,
Exemplo n.º 30
0
    def add_model(self, model=None, path=None, identify_particles=None):
        """add another model in the current one"""

        self.new_external = []
        if path:
            model = ufomodels.load_model(path)

        if not model:
            raise USRMODERROR, 'Need a valid Model'
        else:
            path = model.__path__[0]
        # Check the validity of the model. Too old UFO (before UFO 1.0)
        if not hasattr(model, 'all_orders'):
            raise USRMODERROR, 'Add-on Model doesn\'t follows UFO convention (no couplings_order information)\n' +\
                               'MG5 is able to load such model but NOT to the add model feature.'
        if isinstance(model.all_particles[0].mass, basestring):
            raise USRMODERROR, 'Add-on Model doesn\'t follows UFO convention (Mass/Width of particles are string name, not object)\n' +\
                               'MG5 is able to load such model but NOT to the add model feature.'

        for order in model.all_orders:
            if hasattr(
                    order,
                    'perturbative_expansion') and order.perturbative_expansion:
                raise USRMODERROR, 'Add-on model can not be loop model.'

        for order in model.all_orders:
            self.add_coupling_order(order)

        # Adding automatically identification for anti-particle if needed
        # + define identify_pid which keep tracks of the pdg_code identified
        identify_pid = {}
        if identify_particles:
            for new, old in identify_particles.items():
                new_part = next(
                    (p for p in model.all_particles if p.name == new), None)
                old_part = next((p for p in self.particles if p.name == old),
                                None)
                identify_pid[new_part.pdg_code] = old_part.pdg_code

                if new_part is None:
                    raise USRMODERROR, "particle %s not in added model" % new
                if old_part is None:
                    raise USRMODERROR, "particle %s not in original model" % old
                if new_part.antiname not in identify_particles:
                    new_anti = new_part.antiname
                    old_anti = old_part.antiname
                    misc.sprint(old, new, new_anti, old_anti,
                                old_part.antiname)
                    if old_anti == old:
                        raise USRMODERROR, "failed identification (one particle is self-conjugate and not the other)"
                    logger.info(
                        "adding identification for anti-particle: %s=%s" %
                        (new_anti, old_anti))
                    identify_particles[new_anti] = old_anti

        for parameter in model.all_parameters:
            self.add_parameter(parameter, identify_pid)
        for coupling in model.all_couplings:
            self.add_coupling(coupling)
        for lorentz in model.all_lorentz:
            self.add_lorentz(lorentz)
        for particle in model.all_particles:
            if particle.name in identify_particles:
                self.add_particle(particle,
                                  identify=identify_particles[particle.name])
            else:
                self.add_particle(particle)
        for vertex in model.all_vertices:
            self.add_interaction(vertex)

        self.all_path.append(path)

        return
Exemplo n.º 31
0
def test_net():
    ''' Evaluate the network '''
    # Make result directory and the result file.
    cfg.TRAIN.RANDOM_NUM_VIEWS = False
    result_dir = os.path.join(cfg.DIR.OUT_PATH, cfg.TEST.EXP_NAME)
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    result_fn = os.path.join(result_dir, 'result.mat')

    print("Exp file will be written to: " + result_fn)

    # Make a network and load weights
    NetworkClass = load_model(cfg.CONST.NETWORK_CLASS)
    print('Network definition: \n')
    print(inspect.getsource(NetworkClass.network_definition))
    net = NetworkClass(compute_grad=False)
    net.load(cfg.CONST.WEIGHTS)
    solver = Solver(net)

    # set constants
    batch_size = cfg.CONST.BATCH_SIZE

    # set up testing data process. We make only one prefetching process. The
    # process will return one batch at a time.
    queue = Queue(cfg.QUEUE_SIZE)
    data_pair = category_model_id_pair(dataset_portion=cfg.TEST.DATASET_PORTION)
    processes = make_data_processes(queue, data_pair, 1, repeat=True, train=False)
    num_data = len(processes[0].data_paths)
    num_batch = int(num_data / batch_size)

    # prepare result container
    results = {'cost': np.zeros(num_batch)}
    for thresh in cfg.TEST.VOXEL_THRESH:
        results[str(thresh)] = np.zeros((num_batch, batch_size, 5))

    # Get all test data
    batch_idx = 0
    precisions = []
    for batch_img, batch_camera, batch_voxel in get_while_running(processes[0], queue):
        if batch_idx == num_batch:
            break

        pred, loss, activations = solver.test_output(
                batch_img, batch_camera, batch_voxel)

        for i, thresh in enumerate(cfg.TEST.VOXEL_THRESH):
            for j in range(batch_size):
                r = evaluate_voxel_prediction(pred[j, ...], batch_voxel[j, ...], thresh)
                results[str(thresh)][batch_idx, j, :] = r
                precisions.append(sklearn.metrics.average_precision_score(
                        batch_voxel[j, :, 1].flatten(), pred[j, :, 1].flatten()))

        # record result for the batch
        results['cost'][batch_idx] = float(loss)
        batch_idx += 1
        print('%d/%d, average loss: %f, average precision: %f' % (batch_idx,
                num_batch, results['cost'][:batch_idx].mean(),
                np.mean(precisions)))

    iou_now = results[str(cfg.TEST.VOXEL_THRESH[0])][:batch_idx].reshape(-1, 5)
    print('Total loss: %f' % np.mean(results['cost']))
    print('Total iou: %f' % (iou_now[:, 1] / iou_now[:, 2]).mean())
    print('Total AP: %f' % np.mean(precisions))
    sio.savemat(result_fn, results)
Exemplo n.º 32
0
        try:
            model = save_load_object.load_from_file( \
                                          os.path.join(model_path, 'model.pkl'))
        except Exception, error:
            logger.info('failed to load model from pickle file. Try importing UFO from File')
        else:
            # check path is correct 
            if model.has_key('version_tag') and model.get('version_tag') == os.path.realpath(model_path) + str(misc.get_pkg_info()):
                _import_once.append(model_path)
                return model

    if model_path in _import_once:
        raise MadGraph5Error, 'This model is modified on disk. To reload it you need to quit/relaunch mg5' 

    # Load basic information
    ufo_model = ufomodels.load_model(model_path)
    ufo2mg5_converter = UFOMG5Converter(ufo_model)
    model = ufo2mg5_converter.load_model()
    
    if model_path[-1] == '/': model_path = model_path[:-1] #avoid empty name
    model.set('name', os.path.split(model_path)[-1])
    model.set('version_tag', os.path.realpath(model_path) + str(misc.get_pkg_info()))

    # Load the Parameter/Coupling in a convinient format.
    parameters, couplings = OrganizeModelExpression(ufo_model).main()
    model.set('parameters', parameters)
    model.set('couplings', couplings)
    model.set('functions', ufo_model.all_functions)
    
    # save in a pickle files to fasten future usage
    save_load_object.save_to_file(os.path.join(model_path, 'model.pkl'), model) 
Exemplo n.º 33
0
def validate(test_data_dir="./images",
             filename="mode.pt",
             network="ResNet50",
             gpu=None,
             cpu_count=os.cpu_count(),
             batch_size=16):
    classes = ImageFolder(test_data_dir).classes
    model = load_model(filename, network, len(classes))

    cuda_id = int(gpu.split(",")[0]) if gpu is not None else 0
    device = torch.device(
        "cuda:{}".format(cuda_id) if gpu is not None else "cpu")
    model = model.to(device)
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    val_dataset = ImageFolder(
        test_data_dir,
        transforms.Compose([
            transforms.Resize(224),
            transforms.ToTensor(),
            normalize,
        ]))
    criterion = torch.nn.CrossEntropyLoss()

    val_loader = DataLoader(val_dataset,
                            shuffle=False,
                            num_workers=cpu_count,
                            batch_size=batch_size)
    # switch to evaluate mode
    model.eval()

    # setup running values
    running_loss = 0.0
    running_corrects = 0
    loss = 0.
    acc = 0.

    y_pred = []
    y_true = []
    conf = []

    total_seen_samples = 0
    with torch.no_grad():
        with trange(
                len(val_loader),
                desc="Validating",
                ncols=80,
                postfix={
                    "loss": 0,
                    "acc": 0
                },
                bar_format=
                "{desc}: {percentage:3.1f}% {bar} {remaining} {n_fmt}/{total_fmt}{postfix}"
        ) as pbar:
            start_time = perf_counter()
            for i, (inputs, labels) in enumerate(val_loader):
                inputs = inputs.to(device)
                batch_size = inputs.size(0)
                total_seen_samples += batch_size
                labels = labels.to(device)

                # compute output
                output = model(inputs)
                preds = torch.argmax(output, 1)
                loss = criterion(output, labels)

                y_pred += preds.cpu().numpy().tolist()
                y_true += labels.cpu().numpy().tolist()
                conf += output.cpu().numpy().tolist()

                # statistics
                running_loss += loss.item()
                running_corrects += torch.sum(preds == labels.data)

                loss = running_loss / (i + 1)
                acc = running_corrects.double() / total_seen_samples

                pbar.set_postfix({
                    "loss": round(float(loss), 2),
                    "acc": round(float(acc), 3)
                })
                pbar.update()

            end_time = perf_counter()

    print("Loss: {:.4f}, Acc: {:.4f}, Time: {:.4f}s".format(
        loss, acc, end_time - start_time))

    return np.array(y_pred), np.array(y_true), np.array(conf)
Exemplo n.º 34
0
def main():
    config_path = Path(args.config_path)
    config = yaml.load(open(config_path))

    net_config = config['Net']
    # loss_config = config['Loss']
    # opt_config = config['Optimizer']
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    n_class = net_config['n_class']
    val_dir = '../data'
    val_name = 'biwi_dataset_list.txt'
    val_type = 'BIWI'
    use_bined = False
    num_workers = 4

    pretrained_path = [
        "/home/linhnv/projects/RankPose/model/headpose_resnet/model_epoch_77_8.775894704750192.pth"
    ]

    # models_path = glob("/home/linhnv/projects/RankPose/model/headpose_resnet/*")
    # models_path = [x for x in models_path if x.startswith("/home/linhnv/projects/RankPose/model/headpose_resnet/model_epoch")]
    # print(models_path)
    for pretrained_path in pretrained_path:
        print(f"[INFO] Pretrained path: {pretrained_path}")

        model = load_model(**net_config)
        # To device
        model = model.to(device)

        modelname = config_path.stem
        output_dir = Path('../model') / modelname
        output_dir.mkdir(exist_ok=True)
        log_dir = Path('../logs') / modelname
        log_dir.mkdir(exist_ok=True)

        # logger = debug_logger(log_dir)
        # logger.debug(config)
        # logger.info(f'Device: {device}')

        params = model.parameters()

        valid_dataset = laod_dataset(data_type=val_type,
                                     split='valid',
                                     base_dir=val_dir,
                                     filename=val_name,
                                     use_bined=False,
                                     n_class=n_class)

        # top_10 = len(train_dataset) // 10
        # top_30 = len(train_dataset) // 3.33
        # train_weights = [ 3 if idx<top_10 else 2 if idx<top_30 else 1 for idx in train_dataset.labels_sort_idx]
        # train_sample = WeightedRandomSampler(train_weights, num_samples=len(train_dataset), replacement=True)

        # train_loader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sample, num_workers=num_workers,
        #                           pin_memory=True, drop_last=True)
        valid_loader = DataLoader(valid_dataset,
                                  batch_size=32,
                                  shuffle=False,
                                  num_workers=num_workers,
                                  pin_memory=True)

        if torch.cuda.is_available():
            model = nn.DataParallel(model)

        # logger.info(f'Load pretrained from {pretrained_path}')
        param = torch.load(pretrained_path, map_location='cpu')
        if "state_dict" in param:
            model.load_state_dict(param['state_dict'], strict=False)
        else:
            model.load_state_dict(param)
        del param

        valid_losses = []
        valid_diffs = []
        model.eval()
        with torch.no_grad():
            with tqdm(valid_loader) as _tqdm:
                for batched in _tqdm:
                    if use_bined:
                        images, labels, yaw_labels, pitch_labels, roll_labels = batched

                        images, labels = images.to(device), labels.to(device)
                        # yaw_labels, pitch_labels, roll_labels = yaw_labels.to(device), pitch_labels.to(device), roll_labels.to(device)

                        preds, y_pres, p_pres, r_pres = model(
                            images, use_bined)

                        # loss = loss_fn([preds, y_pres, p_pres, r_pres], [labels, yaw_labels, pitch_labels, roll_labels])

                        diff = calculate_diff(preds, labels)
                    else:
                        images, labels = batched

                        images, labels = images.to(device), labels.to(device)

                        preds = model(images, use_bined)

                        # loss = loss_fn([preds], [labels])

                        diff = calculate_diff(preds, labels)

                    _tqdm.set_postfix(OrderedDict(mae=f'{diff:.2f}'))
                    # _tqdm.set_postfix(OrderedDict(loss=f'{loss.item():.3f}', d_y=f'{np.mean(diff[:,0]):.1f}', d_p=f'{np.mean(diff[:,1]):.1f}', d_r=f'{np.mean(diff[:,2]):.1f}'))
                    valid_losses.append(0)
                    valid_diffs.append(diff)

        valid_loss = np.mean(valid_losses)
        valid_diff = np.mean(valid_diffs)
        print(f'valid diff: {valid_diff}')
Exemplo n.º 35
0
def test_net():
    ''' Evaluate the network '''
    # Make result directory and the result file.
    result_dir = os.path.join(cfg.DIR.OUT_PATH, cfg.TEST.EXP_NAME)
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    result_fn = os.path.join(result_dir, 'result.mat')

    print("Exp file will be written to: " + result_fn)

    # Make a network and load weights
    NetworkClass = load_model(cfg.CONST.NETWORK_CLASS)

    #print('Network definition: \n')
    #print(inspect.getsource(NetworkClass.network_definition))

    net = NetworkClass()
    
    net.cuda()
    
    solver = Solver(net)
    solver.load(cfg.CONST.WEIGHTS)

    # set constants
    batch_size = cfg.CONST.BATCH_SIZE

    # set up testing data process. We make only one prefetching process. The
    # process will return one batch at a time.
    queue = Queue(cfg.QUEUE_SIZE)
    data_pair = category_model_id_pair(dataset_portion=cfg.TEST.DATASET_PORTION)
    processes = make_data_processes(queue, data_pair, 1, repeat=False, train=False)

    num_data = len(processes[0].data_paths)
    num_batch = int(num_data / batch_size)

    # prepare result container
    results = {'cost': np.zeros(num_batch),
               'mAP': np.zeros((num_batch, batch_size))}
    # Save results for various thresholds
    for thresh in cfg.TEST.VOXEL_THRESH:
        results[str(thresh)] = np.zeros((num_batch, batch_size, 5))

    # Get all test data
    batch_idx = 0
    for batch_img, batch_voxel in get_while_running(processes[0], queue):
        if batch_idx == num_batch:
            break

        #activations is a list of torch.cuda.FloatTensor
        pred, loss, activations = solver.test_output(batch_img, batch_voxel)
        
        #convert pytorch tensor to numpy array
        pred = pred.data.cpu().numpy()
        loss = loss.data.cpu().numpy()

        for j in range(batch_size):
            # Save IoU per thresh
            for i, thresh in enumerate(cfg.TEST.VOXEL_THRESH):
                r = evaluate_voxel_prediction(pred[j, ...], batch_voxel[j, ...], thresh)
                results[str(thresh)][batch_idx, j, :] = r

            # Compute AP
            precision = sklearn.metrics.average_precision_score(
                batch_voxel[j, 1].flatten(), pred[j, 1].flatten())

            results['mAP'][batch_idx, j] = precision

        # record result for the batch
        results['cost'][batch_idx] = float(loss)
        print('%d/%d, costs: %f, mAP: %f' %
                (batch_idx, num_batch, loss, np.mean(results['mAP'][batch_idx])))
        batch_idx += 1


    print('Total loss: %f' % np.mean(results['cost']))
    print('Total mAP: %f' % np.mean(results['mAP']))

    sio.savemat(result_fn, results)
Exemplo n.º 36
0
def main():
  config = get_config()
  if config.resume:
    json_config = json.load(open(config.resume + '/config.json', 'r'))
    json_config['resume'] = config.resume
    config = edict(json_config)

  if config.is_cuda and not torch.cuda.is_available():
    raise Exception("No GPU found")
  device = get_torch_device(config.is_cuda)

  logging.info('===> Configurations')
  dconfig = vars(config)
  for k in dconfig:
    logging.info('    {}: {}'.format(k, dconfig[k]))

  DatasetClass = load_dataset(config.dataset)
  if config.test_original_pointcloud:
    if not DatasetClass.IS_FULL_POINTCLOUD_EVAL:
      raise ValueError('This dataset does not support full pointcloud evaluation.')

  if config.evaluate_original_pointcloud:
    if not config.return_transformation:
      raise ValueError('Pointcloud evaluation requires config.return_transformation=true.')

  if (config.return_transformation ^ config.evaluate_original_pointcloud):
    raise ValueError('Rotation evaluation requires config.evaluate_original_pointcloud=true and '
                     'config.return_transformation=true.')

  logging.info('===> Initializing dataloader')
  if config.is_train:
    train_data_loader = initialize_data_loader(
        DatasetClass,
        config,
        phase=config.train_phase,
        num_workers=config.num_workers,
        augment_data=True,
        shuffle=True,
        repeat=True,
        batch_size=config.batch_size,
        limit_numpoints=config.train_limit_numpoints)

    val_data_loader = initialize_data_loader(
        DatasetClass,
        config,
        num_workers=config.num_val_workers,
        phase=config.val_phase,
        augment_data=False,
        shuffle=True,
        repeat=False,
        batch_size=config.val_batch_size,
        limit_numpoints=False)
    if train_data_loader.dataset.NUM_IN_CHANNEL is not None:
      num_in_channel = train_data_loader.dataset.NUM_IN_CHANNEL
    else:
      num_in_channel = 3  # RGB color

    num_labels = train_data_loader.dataset.NUM_LABELS
  else:
    test_data_loader = initialize_data_loader(
        DatasetClass,
        config,
        num_workers=config.num_workers,
        phase=config.test_phase,
        augment_data=False,
        shuffle=False,
        repeat=False,
        batch_size=config.test_batch_size,
        limit_numpoints=False)
    if test_data_loader.dataset.NUM_IN_CHANNEL is not None:
      num_in_channel = test_data_loader.dataset.NUM_IN_CHANNEL
    else:
      num_in_channel = 3  # RGB color

    num_labels = test_data_loader.dataset.NUM_LABELS

  logging.info('===> Building model')
  NetClass = load_model(config.model)
  if config.wrapper_type == 'None':
    model = NetClass(num_in_channel, num_labels, config)
    logging.info('===> Number of trainable parameters: {}: {}'.format(NetClass.__name__,
                                                                      count_parameters(model)))
  else:
    wrapper = load_wrapper(config.wrapper_type)
    model = wrapper(NetClass, num_in_channel, num_labels, config)
    logging.info('===> Number of trainable parameters: {}: {}'.format(
        wrapper.__name__ + NetClass.__name__, count_parameters(model)))

  logging.info(model)
  model = model.to(device)

  if config.weights == 'modelzoo':  # Load modelzoo weights if possible.
    logging.info('===> Loading modelzoo weights')
    model.preload_modelzoo()

  # Load weights if specified by the parameter.
  elif config.weights.lower() != 'none':
    logging.info('===> Loading weights: ' + config.weights)
    state = torch.load(config.weights)
    if config.weights_for_inner_model:
      model.model.load_state_dict(state['state_dict'])
    else:
      if config.lenient_weight_loading:
        matched_weights = load_state_with_same_shape(model, state['state_dict'])
        model_dict = model.state_dict()
        model_dict.update(matched_weights)
        model.load_state_dict(model_dict)
      else:
        model.load_state_dict(state['state_dict'])

  if config.is_train:
    train(model, train_data_loader, val_data_loader, config)
  else:
    test(model, test_data_loader, config)
Exemplo n.º 37
0
import cv2

from pcn import pcn_detect
from models import load_model
from utils import draw_face



if __name__ == '__main__':
    # network detection
    nets = load_model()
    cam = cv2.VideoCapture(0)
    while cam.isOpened():
        ret, img = cam.read()
        faces = pcn_detect(img, nets)
        for face in faces:
            draw_face(img, face)
        cv2.imshow('PCN', img)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
Exemplo n.º 38
0
def inference(model, output, csv, imgpath, valid):
    np.random.seed(0)
    test_set_full = pd.read_csv(csv)
    list_ids = list(test_set_full["image"].values)
    list_masks = list(test_set_full["mask"].values)
    # get root directories
    base_img_path = imgpath
    img_path = os.path.join(base_img_path, "images")
    mask_path = os.path.join(base_img_path, "masks")

    list_ids = list_ids
    list_masks = list_masks
    labels = dict(zip(list_ids, list_masks))

    if valid:
        ground_truth_csv = pd.read_csv(
            os.path.join("single_instance_dataset_wradius.csv"))

    mask_output_path = os.path.join(output, model, "prediction_masks")
    if not os.path.exists(mask_output_path):
        os.makedirs(mask_output_path)

    valid_generator = utils.DataGeneratorMobileNetKeras(
        batch_size=1,
        img_path=img_path,
        labels=labels,
        list_IDs=list_ids,
        n_channels=3,
        n_channels_label=1,
        shuffle=False,
        mask_path=mask_path,
        augmentation=False,
    )

    model_loaded = models.load_model(
        os.path.join(".", "output", "models", model))
    prediction = model_loaded.predict_generator(generator=valid_generator,
                                                use_multiprocessing=True,
                                                verbose=True)

    threshold_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
    test_images = list_ids

    valid_list = list()
    nan_keys = [
        "component_x",
        "component_y",
        "gt_area",
        "pixel_distance",
        "norm_distance",
        "prediction_area",
        "component_area",
        "component_relative_area",
        "component_intersection",
        "component_union",
        "component_iou",
        "component_pw_recall",
        "component_pw_precision",
    ]

    for threshold in threshold_list:
        array_pred = np.copy(prediction)
        for i in np.arange(0, prediction.shape[0]):
            # get prediction and normalize
            pred = array_pred[i, :, :, 0]
            pred = (pred > threshold).astype(bool)
            cv2.imwrite(
                mask_output_path + "/" + (str(threshold)) + test_images[i],
                (pred.astype(np.uint8)) * 255,
            )
            if valid:
                data = {}
                row = utils.get_sample_ground_truth(test_images[i],
                                                    ground_truth_csv)
                gt_center = np.ndarray([1, 2])
                gt_center[0, 0] = (row["x_center_resize"].values[0]) / 2
                gt_center[0, 1] = (row["y_center_resize"].values[0]) / 2
                diam_resize = (row["diam_resize"].values[0]) / 2

                ground_truth = utils.read_image_grayscale(
                    os.path.join(mask_path,
                                 "mask_" + test_images[i][:-3] + "png"))
                ground_truth = cv2.resize(ground_truth, (0, 0), fx=0.5, fy=0.5)
                ground_truth = (ground_truth > 0).astype(int)

                (
                    num_labels,
                    labeled_img,
                    centers,
                    iou_array,
                ) = connected_components_with_threshold(
                    (pred).astype(np.uint8), 0, ground_truth)

                gt_area = np.sum(ground_truth)
                prediction_area = np.sum(pred)
                # computo de metricas segun resultado de component analysis

                if centers is not None:
                    for component_label in range(num_labels):
                        filtered_component = labeled_img == (component_label +
                                                             1)
                        component_center = mass_center(filtered_component)

                        pixel_distance = np.linalg.norm(
                            np.subtract(gt_center, component_center))

                        data["mask_name"] = test_images[i]
                        data["threshold"] = str(threshold)
                        data["mask_x"] = gt_center[0, 0]
                        data["mask_y"] = gt_center[0, 1]
                        # component_x
                        data["component_x"] = component_center[0]
                        # component_y
                        data["component_y"] = component_center[1]
                        data["pixel_distance"] = pixel_distance
                        data["norm_distance"] = pixel_distance / diam_resize
                        data["gt_area"] = gt_area
                        data["prediction_area"] = prediction_area
                        # component_area
                        component_area = np.sum(filtered_component)
                        data["component_area"] = component_area
                        # relative_area
                        data["component_relative_area"] = (component_area /
                                                           prediction_area)
                        # component pixelwise metrics
                        intersection = np.sum(
                            np.logical_and(filtered_component, ground_truth))
                        union = np.sum(
                            np.logical_or(filtered_component, ground_truth))
                        data["component_intersection"] = intersection
                        data["component_union"] = union
                        data["component_iou"] = intersection / union
                        data["component_pw_recall"] = intersection / gt_area
                        data[
                            "component_pw_precision"] = intersection / component_area

                else:  # no buds detected register it in the metrics dict
                    data["mask_name"] = test_images[i]
                    data["threshold"] = str(threshold)
                    data["mask_x"] = gt_center[0, 0]
                    data["mask_y"] = gt_center[0, 1]
                    # mass assing np.nan to nan-able keys
                    data.update(dict.fromkeys(nan_keys, np.nan))

                valid_list.append(data)

    print(model + " images generated!")
    if valid:
        csv = pd.DataFrame(valid_list)
        csv.to_csv(os.path.join(output, model[:-3] + ".csv"))
Exemplo n.º 39
0
def get_recommendations(recommender_id, user, count):
    recommender = get_recommender(recommender_id)
    model = load_model(recommender["model"])
    count = count or recommender["default-recommendations"]
    return model.recommend(user=user, count=count)
def main(args):

    print('Feature extractor training.')
    print('CONFIGURATION:\t{}'.format(args.config))
    with open(args.config) as json_config_file:
        config = utils.AttrDict(json.load(json_config_file))

    # Set up output directory
    experiment_name = generate_experiment_name(config)
    model_dir = os.path.join(os.path.expanduser(config.output.output_dir), experiment_name)
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    print('Model saved at {}'.format(model_dir))

    config_filename = path_leaf(args.config)
    copyfile(args.config, os.path.join(model_dir, config_filename))

    # CUDA for PyTorch
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if use_cuda else "cpu")

    source_loader = dataloaders.get_traindataloaders(config.source_dataset,
                                                    config)
    target_loader = dataloaders.get_traindataloaders(config.target_dataset,
                                                     config)
    evaluators_list = dataloaders.get_evaluators(config.evaluation_datasets,
                                                 config)

    # Set up training model
    print('Building training model')
    if config.model.checkpoint:
        checkpoint_path = config.model.checkpoint_path
    else:
        checkpoint_path = None
    model = models.load_model(config.model.model_arch,
                              device,
                              checkpoint_path=checkpoint_path,
                              embedding_size=config.model.embedding_size,
                              imgnet_pretrained=config.model.pretrained_imagenet)

    optimizer = optim.SGD(model.parameters(), lr=config.hyperparameters.learning_rate, momentum=0.9, nesterov=True, weight_decay=2e-4)

    scheduler = lr_scheduler.ExponentialLR(optimizer, config.hyperparameters.learning_rate_decay_factor)

    model = model.to(device)

    plotter = utils.VisdomPlotter(config.visdom.server ,env_name=experiment_name, port=config.visdom.port)

    print('Fitting source dataset.')
    gmixture = clustering.distance_supervised_gaussian_mixture(source_loader,
                                                               model,
                                                               device,
                                                               _plotter=plotter,
                                                               name='Source Gaussians')

    print('Fitting target dataset.')
    clustering.update_gaussian_mixture(gmixture,
                                       target_loader,
                                       model,
                                       device,
                                       _plotter=plotter,
                                       name='Target Gaussians')

    print('DualTriplet loss training mode.')
    miner = miners.get_miner(config.miner,
                             config.hyperparameters.margin,
                             config.hyperparameters.people_per_batch,
                             plotter,
                             deadzone_ratio=config.hyperparameters.deadzone_ratio)
    miner.gmixture = gmixture

    loss = losses.DualtripletLoss(config.hyperparameters.margin,
                                  config.hyperparameters.lamda,
                                  plotter)

    model_trainer = trainer.Dualtriplet_Trainer(model,
                                                miner,
                                                loss,
                                                optimizer,
                                                scheduler,
                                                device,
                                                plotter,
                                                config.hyperparameters.margin,
                                                config.model.embedding_size,
                                                batch_size=config.hyperparameters.batch_size)

    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Loop over epochs
    epoch = 0
    print('Training Launched.')
    while epoch < config.hyperparameters.n_epochs:

        # Validation
        for evaluator in evaluators_list:
            print('\nEvaluation on {}'.format(evaluator.test_name))
            evaluator.evaluate(model,
                               device,
                               plotter=plotter,
                               epoch=epoch)

        # Training
        print('\nExperimentation {}'.format(config.experiment))
        print('Train Epoch {}'.format(epoch))
        model_trainer.Train_Epoch(source_loader, target_loader, epoch)

        # Save model
        # if not (epoch + 1) % config.output.save_interval:
        #
        #     model_file_path = os.path.join(model_dir, 'model_{}.pth'.format(epoch))
        #     print('\nSave model at {}'.format(model_file_path))
        #     torch.save({'epoch': epoch,
        #                 'model_state_dict': utils.state_dict_to_cpu(model.state_dict()),
        #                 'optimizer_state_dict': optimizer.state_dict(),
        #                 'scheduler_state_dict': scheduler.state_dict(),
        #                 'embedding_size': config.model.embedding_size
        #                 }, model_file_path)

        epoch += 1

    model_file_path = os.path.join(model_dir, 'model_{}.pth'.format(epoch))
    print('\nSave model at {}'.format(model_file_path))
    torch.save({'epoch': epoch,
                'model_state_dict': utils.state_dict_to_cpu(model.state_dict()),
                'optimizer_state_dict': optimizer.state_dict(),
                'scheduler_state_dict': scheduler.state_dict(),
                'embedding_size': config.model.embedding_size
                }, model_file_path)
    print('Finish.')