Exemple #1
0
def output_stuff(model, history):

	#parameters
	params={ #dictionary
		'filename' : filename,
		'batch_size' : batch_size,
		'classes' : classes,
		'epochs' : epochs,
		'learning_rate' : learning_rate,
		'decay' : decay,
		'momentum' : momentum,
		'nesterov' : nesterov,
		'train_datapoints' : train_datapoints,
		'test_datapoints' : train_datapoints,
		}
	with open(filename+'_params.json', 'w') as fp:
	    json.dump(params, fp)

	#architecture, for keras and nengo loading respectively
	json_string = model.to_json()
	open(filename+'_model.json', 'w').write(json_string)


	with open(filename+"_arch.json","w") as datafile:
		dump(arch_dict, datafile)

	# weights
	# model.save_weights(filename+'_weights.h5')

	#history
	history_file = open(filename+"_history.txt", "w")
	history_file.write(str(history.history))
	history_file.close()
Exemple #2
0
def test_file_numpy():
    path = join(mkdtemp(), 'pytest-np.json')
    with open(path, 'wb+') as fh:
        dump(deepcopy(npdata), fh, compression=9)
    with open(path, 'rb') as fh:
        data2 = load(fh, decompression=True)
    _numpy_equality(data2)
Exemple #3
0
def extract_caffe_model(model, weights, output_path):
    """extract caffe model's parameters to numpy array, and write them to files
    Args:
      model: path of '.prototxt'
      weights: path of '.caffemodel'
      output_path: output path of numpy params
    Returns:
      None
    """
    net = caffe.Net(model, weights, caffe.TEST)

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    with open(os.path.join(output_path, 'shapes.ahsf'), 'w') as shape_file:
        for item in net.params.items():
            name, layer = item
            print('convert layer: ' + name)

            num = 0
            for p in net.params[name]:
                name = name.replace('/', '\\')
                name = name if num == 0 else '{}_bias'.format(name)

                with open(os.path.join(output_path, '{}.ahsf'.format(name)),
                          'w') as outfile:
                    dump(p.data.flatten(), outfile)

                shape_file.write("{} {} ".format(name, len(p.data.shape)))
                for dim in p.data.T.shape:
                    shape_file.write(str(dim) + " ")
                shape_file.write('\n')
                num += 1
Exemple #4
0
def test_file_numpy():
	path = join(mkdtemp(), 'pytest-np.json')
	with open(path, 'wb+') as fh:
		dump(npdata, fh, compression=9)
	with open(path, 'rb') as fh:
		data2 = load(fh, decompression=True)
	_numpy_equality(data2)
Exemple #5
0
def save_json(obj, outfile, allow_nan=True, compression=False):
    """Save an ssbio object as a JSON file using json_tricks"""
    if compression:
        with open(outfile, 'wb') as f:
            dump(obj, f, allow_nan=allow_nan, compression=compression)
    else:
        with open(outfile, 'w') as f:
            dump(obj, f, allow_nan=allow_nan, compression=compression)
    log.info('Saved {} (id: {}) to {}'.format(type(obj), obj.id, outfile))
Exemple #6
0
def output_stuff(model, history):

	#parameters
	params={ #dictionary
		'filename' : filename,
		'batch_size' : batch_size,
		'classes' : classes,
		'epochs' : epochs,
		'learning_rate' : learning_rate,
		'decay' : decay,
		'momentum' : momentum,
		'nesterov' : nesterov,
		'train_datapoints' : train_datapoints,
		'test_datapoints' : train_datapoints,
		}
	with open(filename+'_params.json', 'w') as fp:
	    json.dump(params, fp)

	#architecture, for keras and nengo loading respectively
	json_string = model.to_json()
	open(filename+'_model.json', 'w').write(json_string)

	#To-do:make this into a loop, or figure out how to properly extract weight information
	arch_dict=OrderedDict((
		('input',{'type':'input', 'input_name':'image', 'weights':None, 'biases':None, 'activation':None}),
		('conv0', {'type':'Convolution2D', 'input_name':'input', 'weights':model.nodes['conv0'].get_weights()[0],'biases':model.nodes['conv0'].get_weights()[1],'activation':'relu', 'stride':1,'activities':None}), #get_outputs(model,'input','conv1',X_test[:test_datapoints])
		('maxpool0', {'type':'MaxPooling2D', 'input_name':'conv0', 'weights':None, 'activation':None, 'biases':None,'stride':None, 'pool_type':'max', 'pool_size': ps,'activities':None}),
		('conv1', {'type':'Convolution2D', 'input_name':'maxpool0', 'weights':model.nodes['conv1'].get_weights()[0], 'biases':model.nodes['conv1'].get_weights()[1], 'activation':'relu', 'stride':1,'activities':None}),
		('maxpool1', {'type':'MaxPooling2D', 'input_name':'conv1', 'weights':None,'activation':None, 'biases':None, 'stride':None, 'pool_type':'max', 'pool_size': ps,'activities':None}),
		('flatten', {'type':'Flatten', 'input_name':'maxpool1', 'weights':None,'activation':None, 'biases':None, 'stride':1,'activities':None}),
		('dense0', {'type':'Dense', 'input_name':'flatten', 'weights':model.nodes['dense0'].get_weights()[0], 'biases':model.nodes['dense0'].get_weights()[1],'activation':'relu','stride':1,'activities':None}),
		('dense1', {'type':'Dense', 'input_name':'dense0', 'weights':model.nodes['dense1'].get_weights()[0],'biases':model.nodes['dense1'].get_weights()[1], 'activation':'softmax', 'stride':1,'activities':None}),
		('output', {'type':'output', 'input_name':'dense1', 'weights':None, 'biases':None, 'activation':None}),
	))
	# print ([get_outputs(model,'input',the_node,X_test[:test_datapoints]).shape for the_node in model.nodes])
	# data=dumps(arch_dict)
	with open(filename+"_arch.json","w") as datafile:
		dump(arch_dict, datafile)

	#weights
	# model.save_weights(filename+'_weights.h5')

	#history
	history_file = open(filename+"_history.txt", "w")
	history_file.write(str(history.history))
	history_file.close()
def test_compressed_to_disk():
    arr = [array([[1.0, 2.0], [3.0, 4.0]])]
    path = join(mkdtemp(), 'pytest-np.json.gz')
    with open(path, 'wb+') as fh:
        dump(arr, fh, compression=True, properties={'ndarray_compact': True})
Exemple #8
0
import argparse
import os
import numpy as np
from PIL import Image

from json_tricks.np import dump

parser = argparse.ArgumentParser('Extract pixel and shape from an input image')
parser.add_argument(metavar='image_path', dest='image',
                    help='Path to model.prototxt')

args = parser.parse_args()

image_path = args.image
output_path = './image'

if not os.path.exists(output_path):
    os.makedirs(output_path)

im = Image.open(image_path)
im_data = np.asarray(im)

with open(os.path.join(output_path, 'image.ahsf'), 'w') as mean_file:
    dump(im_data.flatten(), mean_file)

with open(os.path.join(output_path, 'image_shape.ahsf'), 'w') as shape_file:
    shape = im_data[None] if len(im_data.shape) == 2 else im_data.shape
    for shape in im_data[None].T.shape:
        shape_file.write(str(shape) + ' ')

Exemple #9
0
            cv2.imshow('SURF Match + Inliers', img3)
            cv2.waitKey(0)
            cv2.destroyAllWindows()

        #Output CSV
        if arguments.o:
            outFolder = "surf_experiments/"
            util.initWrite()
            util.writeFile(kp2, d2, img1Path, img2Path, inliers, percent,
                           len(kp2))
            util.closeWrite(outFolder, img2Path, 'surf')

    print("\n#### Ranking ####")
    rList = np.sort(resList, order='inliers')[::-1]
    for bestPair in range(10):
        print '#%d: %s -> Inliers: %d' % (bestPair + 1, rList[bestPair][1],
                                          rList[bestPair][2])
        print '{percent:.2%}'.format(percent=rList[bestPair][3])

    ## # Results and Experimental Values Logging # ##
    if arguments.o:
        subprocess.check_output([
            "sed -e '!d' surf_experiments/surf*.csv >> surf_experiments/surf_"
            + img1Path[8:-4] + "_merge.csv"
        ],
                                shell=True)
        jList = rList.reshape((n, 1))
        with open('results.json', 'w') as resultFile:
            dump({'Results': jList}, resultFile)
Exemple #10
0
            if rList[i][1] in imgClassList:
                rankedClassList[idx][0] = i + 1
                rankedClassList[idx][1] = rList[i][1]
                rankedClassList[idx][2] = rList[i][2]
                rankedClassList[idx][3] = rList[i][3]

                print "%d->%s (%d inliers) " % (idx, rList[i][1], rList[i][2])
                print '{percent:.2%}'.format(percent=rList[i][3])
                idx += 1
        classRank = rankedClassList.reshape(15, 1)

        ## # Results and Experimental Values Logging # ##
        if arguments.wdata:
            try:
                cmd = "sed -e '!d' sift_experiments/sift*.csv >> sift_experiments/" + img1Path[:
                                                                                               -4] + "/data_merged.csv"
                rm = "rm -r sift_experiments/*.csv sift_experiments/*.xls"

                subprocess.check_output([cmd], shell=True)
                subprocess.check_output([rm], shell=True)
                jList = rList.reshape((n, 1))
                with open(expDir + "/" + img1Path[:-4] + '/results.json',
                          'w') as resultFile:
                    dump({'Results': jList}, resultFile)
                with open(expDir + "/" + img1Path[:-4] + '/classRank.json',
                          'w') as cRank:
                    dump({'ClassRank': classRank}, cRank)

            except (RuntimeError, TypeError, NameError):
                print("Internal Structure Error")
Exemple #11
0
 def to_json(features, filepath=None):
     if filepath is None:
         return json.dumps(features, indent=4)
     else:
         json.dump(features, open(filepath, 'w'), indent=4)
Exemple #12
0
import h5py
import numpy as np
from json_tricks.np import dump


with h5py.File("model_weights.h5") as hf:
	dataset_names = ('/layer_0/param_0', '/layer_0/param_1')

	weights = {name: np.array(hf.get(name)) for name in dataset_names}
	
	fp = open("weights.json", 'wt')
	dump(weights, fp, indent=4)
	fp.close()

    
from json_tricks.np import dump

parser = argparse.ArgumentParser(
    'Extract pixel and shape from a mean mean.binaryproto file')
parser.add_argument(metavar='mean_path',
                    dest='mean',
                    help='Path to model.prototxt')

args = parser.parse_args()

output_path = './mean'

if not os.path.exists(output_path):
    os.makedirs(output_path)

blob = caffe.proto.caffe_pb2.BlobProto()
data = open(args.mean, 'rb').read()
blob.ParseFromString(data)
arr = np.array(caffe.io.blobproto_to_array(blob))

# Binaryproto has shape of (1, 1, 28, 28), arr[0] to get (1, 28, 28)
out = arr[0]

with open(os.path.join(output_path, 'mean.ahsf'), 'w') as mean_file:
    dump(out.flatten(), mean_file)

with open(os.path.join(output_path, 'mean_shape.ahsf'), 'w') as shape_file:
    for shape in out.T.shape:
        shape_file.write(str(shape) + ' ')