예제 #1
0
파일: io.py 프로젝트: feiranl/ssbio
def load_json(file, new_root_dir=None, decompression=False):
    """Load a JSON file using json_tricks"""
    if decompression:
        with open(file, 'rb') as f:
            my_object = load(f, decompression=decompression)
    else:
        with open(file, 'r') as f:
            my_object = load(f, decompression=decompression)
    if new_root_dir:
        my_object.root_dir = new_root_dir

    return my_object
예제 #2
0
def test_file_numpy():
    path = join(mkdtemp(), 'pytest-np.json')
    with open(path, 'wb+') as fh:
        dump(deepcopy(npdata), fh, compression=9)
    with open(path, 'rb') as fh:
        data2 = load(fh, decompression=True)
    _numpy_equality(data2)
def import_keras_json(filename):

	#import architecture and weights from keras network
	print 'Loading Model Architecture from Keras .json output...'
	with open(filename+"_arch.json") as datafile:    
		arch_dict=load(datafile,preserve_order=True)
	return arch_dict
예제 #4
0
def test_file_numpy():
	path = join(mkdtemp(), 'pytest-np.json')
	with open(path, 'wb+') as fh:
		dump(npdata, fh, compression=9)
	with open(path, 'rb') as fh:
		data2 = load(fh, decompression=True)
	_numpy_equality(data2)
예제 #5
0
def load_song(path, **kwargs):
    '''
    Small helper function to load song from path
    :param path: (string)    | path to load
    :param kwargs: (arr)     | to pass arguments to the Slice sub class
    :return:
    '''
    y, sr = load(path=path, **kwargs)
    return (y, sr)
예제 #6
0
    def load_json_from_temp_folder(temp_out_folder, expected):
        # load the results from the temporary folder
        out_dict = {}
        for exp in expected:
            fpath = os.path.join(temp_out_folder, exp + '.json')
            if os.path.isfile(fpath):
                out_dict[exp] = json.load(open(fpath))
                os.unlink(fpath)  # remove file created in the temporary folder
            else:
                raise Exception(u'Missing output {0:s} file'.format(exp))

        return out_dict
예제 #7
0
def main(argv):
    inputfile = ''
    outputfile = ''

    tweets = []
    labels = []
    tknzr = TweetTokenizer()

    try:
        opts, args = getopt.getopt(argv, "hi:o:", ["ifile=", "ofile="])
    except getopt.GetoptError:
        print('vectorize.py -i <inputfile.json> -o <outputfile>')
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print('vectorize.py -i <inputfile.json> -o <outputfile>')
            sys.exit()
        elif opt in ("-i", "--ifile"):
            inputfile = arg
        elif opt in ("-o", "--ofile"):
            outputfile = arg

    data = load(open(inputfile, 'r'))

    # pre process data all uniques and with accepted klasses
    for pre_data in data:
        if (pre_data['klass'] != 'NONE'):
            tweets.append(tknzr.tokenize(pre_data['text']))
            labels.append(pre_data['klass'])

    # load http://crscardellino.me/SBWCE/ trained model
    model = gensim.models.KeyedVectors.load_word2vec_format(
        'SBW-vectors-300-min5.bin', binary=True)

    shape = (len(tweets), MAX_NB_WORDS, 1)
    tweets_tensor = np.zeros(shape, dtype=np.int32)

    for i in range(len(tweets)):
        #vectorizing each word in the tweet with a vector shape = (300,)
        for f in range(len(tweets[i])):
            word = tweets[i][f]
            if f >= MAX_NB_WORDS:
                continue
            #if is not in the vocabulary
            if word in model.wv.vocab:
                tweets_tensor[i][f] = model.wv.index2word.index(word)
            else:
                #if it is a mention vectorize a name, for example @michael123 -> would be Carlos
                if word[0] == '@':
                    tweets_tensor[i][f] = model.wv.index2word.index(name())
                #if not append the unknown token
                else:
                    tweets_tensor[i][f] = model.wv.index2word.index('unk')
        #End of sentence token
        if (f < MAX_NB_WORDS):
            tweets_tensor[i][f] = model.wv.index2word.index('eos')

    labels_array = np.array(list(
        map(lambda label: label_to_value(label), labels)),
                            dtype=np.int32)

    print(tweets_tensor)
    print(labels_array)
img_x, img_y, img_z = 28,28,1
image_dim=(img_z,img_x,img_y)
X_train = X_train.reshape(X_train.shape[0], img_z, img_x, img_y)
X_test = X_test.reshape(X_test.shape[0], img_z, img_x, img_y)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
samples_train = X_train.shape[0]
samples_test = X_test.shape[0]

#import architecture and weights from keras network
print 'Loading Model Architecture from Keras .json output...'
in_filename='mnist_CNN_v2'
with open(in_filename+"_arch.json") as datafile:    
	arch_dict=load(datafile,preserve_order=True)


print 'Building the Network...'
model_dict={}
conn_dict={}
presentation_time=0.05
# image=X_train[0]
# model_dict['image']=image
model = nengo.Network()
with model:

	#rebuild the keras model
	for key, value in arch_dict.items():

		if key == 'input':
예제 #9
0
# This code is mostly by Sze Meng Tan, re-implementing the algorithms
# described in his 1986 PhD thesis "Aperture-synthesis mapping and
# parameter estimation"

import numpy

from scipy.optimize import leastsq, brent
import numpy as np
from json_tricks.np import load

from crocodile.synthesis import *

# Load kernel cache
KERNEL_CACHE = {}
with open('gridder.json', 'r') as f:
    for key, val in load(f):
        KERNEL_CACHE[tuple(key[0])] = val

def trap(vec, dx):
    # Perform trapezoidal integration
    return dx * (numpy.sum(vec) - 0.5 * (vec[0] + vec[-1]))

def func_to_min(h, x0, M, R):
    N = len(h)
    nu = (np.arange(M, dtype=float) + 0.5) / (2 * M)
    x = x0 * np.arange(N+1, dtype=float)/N
    C = calc_gridder_as_C(h, x0, nu, R)
    dnu = nu[1] - nu[0]
    dx = x[1] - x[0]
    h_ext = np.concatenate(([1.0], h))
    loss = np.zeros((len(h_ext), 2, M), dtype=float)
예제 #10
0
 def from_json(input_str):
     try:  # file given
         return json.load(open(input_str), preserve_order=False)
     except IOError:  # string given
         return json.loads(input_str, 'r', preserve_order=False)
예제 #11
0
 def load_music_data(attrstr):
     attrfile = IO.get_abspath_from_relpath_in_tomato(
         'music_data', attrstr + '.json')
     return json.load(open(attrfile))