Пример #1
0
def load_net(fname):
    cudaconv_net = IGPUModel.load_checkpoint(fname)
    layers = cudaconv_net["model_state"]["layers"]
    
    #Note, data dimensions are hardcoded here - not sure we have that info in the cudaconv object?
    decafnet = translator.translate_cuda_network(layers, {'data': (32, 32, 3)})
    return decafnet
Пример #2
0
def load_net(fname):
    cudaconv_net = IGPUModel.load_checkpoint(fname)
    layers = cudaconv_net["model_state"]["layers"]

    #Note, data dimensions are hardcoded here - not sure we have that info in the cudaconv object?
    decafnet = translator.translate_cuda_network(layers, {'data': (32, 32, 3)})
    return decafnet
Пример #3
0
    def __init__(self, params):
        # import only necessary for ConvNet
        from gpumodel import IGPUModel
        from shownet import ShowConvNet
        import options

        load_dic = IGPUModel.load_checkpoint(params['mdl_file'][0])
        op = load_dic['op']
        op2 = ShowConvNet.get_options_parser()
        op.merge_from(op2)
        op.eval_expr_defaults()
        op.set_value('load_file', params['mdl_file'][0])
        op.set_value('gpu', [params['gpu']], False)
        op.set_value('minibatch_size', params['batch'], False)
        op.set_value('write_features', params['layer'], False)
        #for k in op.options.keys():
        #    print k, op.options[k].value

        self.model = ShowConvNet(op, load_dic)
        layers = [_['name'] for _ in self.model.layers]
        params['layer_idx'] = layers.index(params['layer'])
        params['output_size'] = params['input_size'] - params['border_size']*2
        self.params = params

        #initial dummy running
        indata = np.zeros((self.params['input_size']**2, self.params['batch']), dtype=np.float32)
        outdata = np.zeros((self.params['output_size']**2, self.params['batch']), dtype=np.float32)
        data = [indata, outdata]
        ftrs = np.zeros((self.params['batch'], self.params['output_size']**2), dtype=np.float32)
        self.model.do_write_one_feature(data, ftrs, self.params['layer_idx'])
Пример #4
0
def load_from_convnet(filename):
    cudaconv_net = IGPUModel.load_checkpoint(filename)
    layers = cudaconv_net["model_state"]["layers"]
    data_layer = [l for l in layers if l["name"] == "data"][0]
    data_consumer = [l for l in layers if data_layer in l.get("inputLayers", [])][0]
    input_dim = int(sqrt(data_consumer["imgPixels"][0]))  # The width/height of the square images
    input_channels = data_consumer["channels"][0]  # The number of channels in the input images
    return translator.translate_cuda_network(layers, {"data": (input_dim, input_dim, input_channels)})
    def plot_predictions(self, data_provider, output_file='/tmp/predictions.png', train=True, only_errors=True):
        op = shownet.ShowConvNet.get_options_parser()

        local_train = train
        predict_dict =  {
            '--write-features': 'probs',
            '--feature-path' : '/tmp/feature_path',
            '--test-range': '2',
            '--train-range': '1',
            '--show-preds' : 'probs',
            '-f': self.last_model,
            '--data-provider': 'dp_scikit',
            '--multiview-test': 0,
            '--logreg-name': 'aaa'
            }

        op.parse_from_dictionary(predict_dict)
        load_dic = None
        options = op.options
        if options["load_file"].value_given:
            print 'load file option provided'
            load_dic = IGPUModel.load_checkpoint(options["load_file"].value)
            old_op = load_dic["op"]
            old_op.merge_from(op)
            op = old_op
        op.eval_expr_defaults()



        class MyConvNet(shownet.ShowConvNet):
            def get_data_dims(self, idx):
                return data_provider.get_data_dims(idx)

            def get_num_classes(self):
                return data_provider.get_num_classes()

            def get_next_batch(self, train=True):
                return data_provider.get_next_batch(local_train)

            def get_num_test_batches(self):
                return data_provider.get_num_test_batches()

            def get_plottable_data(self, data):
                return data_provider.get_plottable_data(data)

            def init_data_providers(self):
                data_provider.init_data_providers()

            def get_label_names(self):
                return data_provider.get_label_names()



        model = MyConvNet(op, load_dic=load_dic)
        model.only_errors = only_errors
        model.plot_predictions()
        pl.savefig(output_file)
        model.cleanup()
Пример #6
0
def get_model():
    global _model
    if _model is None:
        # This code is adapted from gpumodel.py and shownet.py
        load_dic = IGPUModel.load_checkpoint(app.config["TRAINED_MODEL_PATH"])
        op = ShowConvNet.get_options_parser()
        old_op = load_dic["op"]
        old_op.merge_from(op)
        op = old_op
        _model = ShowConvNet(op, load_dic)
    return _model
Пример #7
0
def get_model():
    global _model
    if _model is None:
        # This code is adapted from gpumodel.py and shownet.py
        load_dic = IGPUModel.load_checkpoint(app.config["TRAINED_MODEL_PATH"])
        op = ShowConvNet.get_options_parser()
        old_op = load_dic["op"]
        old_op.merge_from(op)
        op = old_op
        _model = ShowConvNet(op, load_dic)
    return _model
Пример #8
0
def load_from_convnet(filename):
    cudaconv_net = IGPUModel.load_checkpoint(filename)
    layers = cudaconv_net["model_state"]["layers"]
    data_layer = [l for l in layers if l['name'] == 'data'][0]
    data_consumer = [
        l for l in layers if data_layer in l.get('inputLayers', [])
    ][0]
    input_dim = int(sqrt(data_consumer['imgPixels']
                         [0]))  # The width/height of the square images
    input_channels = data_consumer['channels'][
        0]  # The number of channels in the input images
    return translator.translate_cuda_network(
        layers, {'data': (input_dim, input_dim, input_channels)})
Пример #9
0
def initBFrom(name, shape, params=None):
	assert(params != None)
	assert(len(params) > 0)
	(checkPointFile, layerName) = params[0].split('.')

	net = IGPUModel.load_checkpoint(checkPointFile)
	layernames = [ layer['name'] for layer in net['model_state']['layers'] ]
	if not layerName in layernames:
		raise initWError("There is layer named '%s' in file '%s'" % (layerName, checkPointFile))
	else:
		weightlist = net['model_state']['layers'][layernames.index(layerName)]['biases']
		assert(len(weightlist) > 0)
		assert(weightlist.shape == shape)
		return weightlist
Пример #10
0
    def __init__(self, model_path, data_processor, gpu, layers):
        op = ConvNetPredict.get_options_parser()
        op.set_value('load_file', model_path)
        op.set_value('gpu', str(gpu))

        load_dic = IGPUModel.load_checkpoint(model_path)
        old_op = load_dic["op"]
        old_op.merge_from(op)
        op = old_op
        op.eval_expr_defaults()

        ConvNet.__init__(self, op, load_dic)

        self.dp = data_processor
        self.ftr_layer_idx = map(self.get_layer_idx, layers)
Пример #11
0
def initBFrom(name, shape, params=None):
    assert (params != None)
    assert (len(params) > 0)
    (checkPointFile, layerName) = params[0].split('.')

    net = IGPUModel.load_checkpoint(checkPointFile)
    layernames = [layer['name'] for layer in net['model_state']['layers']]
    if not layerName in layernames:
        raise initWError("There is layer named '%s' in file '%s'" %
                         (layerName, checkPointFile))
    else:
        weightlist = net['model_state']['layers'][layernames.index(
            layerName)]['biases']
        assert (len(weightlist) > 0)
        assert (weightlist.shape == shape)
        return weightlist
 def get_test_error(self):
     data, filenames = self.get_test_patches_and_filenames()
     if self.test_on_images:
         print 'testing on images'
         probabilities, test_results = self.get_predictions(data)
                
         fileProbs, fileLabels, fileIDs = ModelEvaluation.GetUnormalizedJointLogProbability(probabilities, data[1].reshape(-1), filenames)
         filePredictions = ModelEvaluation.GetPredictions(fileProbs)
         fileAccuracy, misclassifiedFiles = ModelEvaluation.CalculateAccuracy(filePredictions, fileLabels)
         
         nExamples = test_results[1]
         results =  ({'logprob' : [test_results[0]['logprob'][0], (1-fileAccuracy) * test_results[1]]}, test_results[1])
         if self.test_only: # Print the individual batch results for safety
             print str(results)
     else:
         print 'not testing on images'
         self.libmodel.startBatch(data, True)
         results = self.finish_batch()
     self.regular_test_outputs += [IGPUModel.get_test_error(self)]
     return results
Transition_matrix = dic['Transition_matrix']
Prior = dic['Prior']

outPred = './training/pred/'
####################################
##################load CNN here######
import getopt as opt
from gpumodel import IGPUModel
from options import *

op = ShowConvNet.get_options_parser()
op.options['load_file'].value = r'.\tmp\tmp\ConvNet__2014-05-14_21.42.41'
op.options['feature_path'].value = r'.\prediction_feature'
op.options['test_batch_range'].value = 1
op.options['write_features'].value = 'probs'
load_dic = IGPUModel.load_checkpoint(op.options["load_file"].value)
old_op = load_dic["op"]
old_op.merge_from(op)
op = old_op
op.eval_expr_defaults()
#op, load_dic = IGPUModel.parse_options(op)
op.options['train_batch_range'].value = [1]
op.options['test_batch_range'].value = [1]
model = ShowConvNet(op, load_dic)
meta = pickle.load(open(r'.\storage\batches.meta'))
data_mean = meta['data_mean']

###############################
# load prestore template
ref_depth = numpy.load('distance_median.npy')
template = cv2.imread('template.png')
                # result.append(item + ';' +model.ClassNameTest(item)+';'+ door + '\n')
                ground_truth=model.ClassNameTest(item)
                print ground_truth,door
                if not model.ClassNameTest(item)=='0':
                    P_num +=1

                if not door == ground_truth:
                    error += 1
        erro_ratio = float(error)/i
        print erro_ratio
        print i,P_num,len(result),error
        # result.append('error_ratio:'+str(erro_ratio)+' Positive_num:'+str(P_num)+' total_num:'+str(i))
        # myreslut = sorted(result, key=lambda result:result[0])
        # if P_num<2000:
        #     my_result = file('myresult_p.txt', 'wb')
        # else:
        #     my_result = file('myresult_n.txt', 'wb')
        # my_result.writelines(myreslut)
        # my_result.close()
    except (UnpickleError, ShowNetError, opt.GetoptError), e:
        print "----------------"
        print "Error:"
        print e
    print 'finish_8'


op = ShowPredction.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowPredction(op, load_dic)
print os.path.exists("G:\\door_data_sampling\\posture\\data_pos\\test\\test_value_p\\")
show_predict_dir('G:\\door_data_sampling\\posture\\test\\org_data\\')
Пример #15
0
if not os.path.exists(outPred):
    os.makedirs(outPred)   
####################################
##################load CNN here######
import getopt as opt
from gpumodel import IGPUModel
from options import *


op = ShowConvNet.get_options_parser()
#op.options['load_file'].value=r'.\ConvNet_3DCNN\tmp\ConvNet__2014-05-28_01.59.00'
### old
op.options['load_file'].value=r'I:\Kaggle_multimodal\StartingKit_track3\Final_project\ConvNet_3DCNN\tmp\ConvNet__2014-05-26_03.40.18'

op.options['write_features'].value ='probs'
load_dic =  IGPUModel.load_checkpoint(op.options["load_file"].value)
old_op = load_dic["op"]
old_op.merge_from(op)
op = old_op
op.eval_expr_defaults()
op.options['train_batch_range'].value=[1]
op.options['test_batch_range'].value=[1]
op.options['data_path'].value=r'.\ConvNet_3DCNN\storage_sk_final'

model = ShowConvNet(op, load_dic)
model.crop_border = 0


meta = pickle.load(open(r'.\ConvNet_3DCNN\storage_sk_final\batches.meta'))
data_mean = meta['data_mean']
    def fit(self, X, y, use_starting_point=True, **kwargs):
        print 'about to fit ConvNetLearn'
        if use_starting_point and self.last_model is not None:
            self.dict['-f']=self.last_model


        op = convnet.ConvNet.get_options_parser()
        op.parse_from_dictionary(self.dict)

        load_dic = None
        options = op.options
        if options["load_file"].value_given:
            print 'load file option provided'
            load_dic = IGPUModel.load_checkpoint(options["load_file"].value)

            name_to_weights = {}
            if self.init_states_models is not None:
                name_to_weights = {}
                for init_model in self.init_states_models:
                    load_dic_local = IGPUModel.load_checkpoint(init_model)

                    for k, v in load_dic_local['model_state'].iteritems():
                        if k == 'layers':
                            for elem in v:
                                name = elem.get('name')
                                weights =  elem.get('weights')
                                if weights is not None:
                                    print 'adding weights for layer {}'.format(name)
                                    if name not in name_to_weights:
                                        name_to_weights[name] = {}
                                    name_to_weights[name]['weights'] = weights
                                    name_to_weights[name]['biases'] = elem.get('biases')
                                    name_to_weights[name]['weightsInc'] = elem.get('weightsInc')
                                    name_to_weights[name]['biasesInc'] = elem.get('biasesInc')




            if len(name_to_weights) > 0:
                print 'layer names with init arrays: {}'.format(name_to_weights.keys())

                for k, v in load_dic['model_state'].iteritems():
                    if k == 'layers':
                        for elem in v:
                            name = elem.get('name')
                            print 'name of layer to possibly be updated {}'.format(name)
                            weights =  elem.get('weights')
                            if weights is not None:
                                if name in name_to_weights:
                                    print 'changing init point of model for layer {}'.format(name)
                                    coefs_name = name_to_weights.get(name)
                                    if coefs_name is None or 'weights' not in coefs_name:
                                        raise Exception('coeef names doent have weights for {}, coef names fields: {}'.format(name, coefs_name.keys()))
                                    elem['weights'] = coefs_name['weights']
                                    elem['biases'] = coefs_name['biases']
                                    elem['weightsInc'] = coefs_name['weightsInc']
                                    elem['biasesInc'] = coefs_name['biasesInc']




            old_op = load_dic["op"]
            old_op.merge_from(op)
            op = old_op
        op.eval_expr_defaults()


        try:
            self.dict.pop('-f')
        except:
            pass


        if hasattr(X, 'get_next_batch'):
            data_provider = X
        else:
            data_provider = InMemorySplitDataProvider(X,y,fraction_test=self.fraction_test)

        data_provider.init_data_providers()

        num_classes = data_provider.get_num_classes()
        logger.info('num classes {}'.format(num_classes))


        #we adjust the number of classes dynamically
        for name in self.mcp_layers.sections():
            if self.mcp_layers.has_option(name,'outputs'):
                if self.mcp_layers.get(name, 'outputs') == 'num_classes':
                    self.mcp_layers.set( name, 'outputs', value='{}'.format(num_classes))
        ##################


        class MyConvNet(convnet.ConvNet):

            def __init__(self, op, load_dic, mcp_layers, mcp_params, fraction_test):
                  self.layer_def_dict = mcp_layers
                  self.layer_params_dict = mcp_params
                  convnet.ConvNet.__init__(self,op,load_dic=load_dic,initialize_from_file=False)
                  self.test_one = True
                  self.epoch = 1
                  self.max_filesize_mb = 5000

            def get_data_dims(self, idx):
                return data_provider.get_data_dims(idx)

            def get_num_classes(self):
                return data_provider.get_num_classes()

            def get_next_batch(self, train=True):
                return data_provider.get_next_batch(train)

            def get_num_test_batches(self):
                return data_provider.get_num_test_batches()

            def init_data_providers(self):
                data_provider.init_data_providers()

        model = MyConvNet(op, load_dic=load_dic, mcp_layers=self.mcp_layers, mcp_params=self.mcp_params, fraction_test=self.fraction_test)

        self.last_model = join(self.output_folder, model.save_file)
        print 'last model name {}'.format(self.last_model)
        model.start()
    def predict_proba(self, X, train=True):
        op = shownet.ShowConvNet.get_options_parser()

        predict_dict =  {
            '--write-features': 'probs',
            '--feature-path' : '/tmp/feature_path',
            '--test-range': '2',
            '--train-range': '1',
            '-f': self.last_model,
            '--data-provider': 'dp_scikit',
            '--show-preds' : '',
            '--multiview-test': 0,
            '--logreg-name': 'aaa'
            }

        op.parse_from_dictionary(predict_dict)
        load_dic = None
        options = op.options
        if options["load_file"].value_given:
            print 'load file option provided'
            load_dic = IGPUModel.load_checkpoint(options["load_file"].value)
            old_op = load_dic["op"]
            old_op.merge_from(op)
            op = old_op
        op.eval_expr_defaults()

        if hasattr(X, 'get_next_batch'):
            data_provider = X
        else:
            data_provider = InMemorySplitDataProvider(X,None,fraction_test=0.0)

        data_provider.init_data_providers()

        class MyConvNet(shownet.ShowConvNet):
            def init_data_providers(self):
                self.dp_params['convnet'] = self

            def compute_probs(self, X):
                out = None

                while True:
                    data_all = data_provider.get_next_batch(train=train)
                    epoch, batch = data_all[0], data_all[1]
                    if epoch != 1:
                        break
                    print 'working on epoch: {}, batch: {}'.format(epoch, batch)
                    data = data_all[2]
                    if isinstance(data[0], list):
                         data_point = data[0][4]

                    else:
                        data_point = data[0].shape[1]
                    print 'data points {}'.format(data_point)
                    num_ftrs = self.layers[self.ftr_layer_idx]['outputs']

                    ftrs = np.zeros((data_point, num_ftrs), dtype=np.single)
                    self.libmodel.startFeatureWriter(data + [ftrs], self.ftr_layer_idx)

                    self.finish_batch()
                    if out is None:
                        out = ftrs
                    else:
                        out = np.vstack((out,ftrs))
                return out
        model = MyConvNet(op, load_dic=load_dic)
        probs = model.compute_probs(data_provider)
        model.cleanup()
        return probs
Пример #18
0
                      "Queue key")
        op.add_option("ensemble-id", "ensemble_id", IntegerOptionParser,
                      "Id of predict ensemble")
        op.add_option("iteration-id", "iteration_id", IntegerOptionParser,
                      "Id of predict iteration")
        op.add_option("data-dir", "data_dir", StringOptionParser,
                      "Id of predict ensemble")
        op.add_option("is-dataset",
                      "is_dataset",
                      BooleanOptionParser,
                      "Format output as file and upload on s3",
                      default=False)

        op.options['load_file'].default = None
        return op


if __name__ == "__main__":
    try:
        op = ShowConvNet.get_options_parser()
        op, load_dic, batch_meta = IGPUModel.parse_options(op)
        model = ShowConvNet(op, load_dic, {
            'batch_meta': batch_meta,
            'data_dir': op.options['data_dir'].value
        })
        model.start()
    except (UnpickleError, ShowNetError, opt.GetoptError), e:
        print "----------------"
        print "Error:"
        print e