示例#1
0
        else:
            # make dummy random data just for testing model inits
            train = RandomEMDataIterator(name='train')
            test = RandomEMDataIterator(name='test')

        if not args.model_file:
            # create the model based on the architecture specified via command line
            arch = EMModelArchitecture.init_model_arch(
                args.model_arch, train.parser.nclass,
                not train.parser.independent_labels)
            model = Model(layers=arch.layers)

            # allocate saving counts for training labels here so they can be saved with convnet checkpoints
            model.batch_meta = {
                'prior_train_count':
                np.zeros((train.parser.nclass, ), dtype=np.int64),
                'prior_total_count':
                np.zeros((1, ), dtype=np.int64)
            }

        if hasattr(model, 'batch_meta'):
            train.parser.batch_meta['prior_train_count'] = model.batch_meta[
                'prior_train_count']
            train.parser.batch_meta['prior_total_count'] = model.batch_meta[
                'prior_total_count']

        assert (train.nmacrobatches > 0
                )  # no training batches specified and not in write_output mode
        macro_epoch = model.epoch_index // train.nmacrobatches + 1
        macro_batch = model.epoch_index % train.nmacrobatches + 1
        if args.data_config and macro_batch > train.batch_range[0]:
            print(
示例#2
0
文件: emneon.py 项目: elhuhdron/emdrp
                                  dim_ordering=args.dim_ordering, batch_range=args.test_range, name='test', 
                                  isTest=True, concatenate_batches=True, NBUF=args.nbebuf,
                                  image_in_size=args.image_in_size) if args.callback_args['eval_freq'] else None
        else:
            # make dummy random data just for testing model inits
            train = RandomEMDataIterator(name='train')
            test = RandomEMDataIterator(name='test')
    
        if not args.model_file:
            # create the model based on the architecture specified via command line
            arch = EMModelArchitecture.init_model_arch(args.model_arch, train.parser.nclass, 
                                                       not train.parser.independent_labels)
            model = Model(layers=arch.layers)
            
            # allocate saving counts for training labels here so they can be saved with convnet checkpoints
            model.batch_meta = {'prior_train_count':np.zeros((train.parser.nclass,),dtype=np.int64), 
                                'prior_total_count':np.zeros((1,),dtype=np.int64)}

        if hasattr(model,'batch_meta'):
            train.parser.batch_meta['prior_train_count'] = model.batch_meta['prior_train_count']
            train.parser.batch_meta['prior_total_count'] = model.batch_meta['prior_total_count']
    
        assert( train.nmacrobatches > 0 )    # no training batches specified and not in write_output mode
        macro_epoch = model.epoch_index//train.nmacrobatches+1
        macro_batch = model.epoch_index%train.nmacrobatches+1
        if args.data_config and macro_batch > train.batch_range[0]:
            print('Model loaded at model epoch %d, setting to training batch %d' % (model.epoch_index,macro_batch,))
            train.reset_batchnum(macro_batch)
        
        # print out epoch and batch as they were in cuda-convnets2, starting at 1
        print('Training from epoch %d to %d with %d/%d training/testing batches per epoch, %d examples/batch' \
            % (macro_epoch, args.epochs, train.nmacrobatches, test.nmacrobatches if test else 0,