Example #1
0
def _registerEnumPicklers(): 
    from copy_reg import constructor, pickle
    def reduce_enum(e):
        enum = type(e).__name__.split('.')[-1]
        return ( _tuple2enum, ( enum, int(e) ) )
    constructor( _tuple2enum)
    pickle(openravepy_int.IkParameterizationType,reduce_enum)
Example #2
0
def save_model(model, save_fp):
    if not os.path.exists(save_fp):
        print('Saving model...')
        with open(save_fp, 'wb') as f:
            pickle(model, f)
    else:
        print(f'model {save_fp} always exist')
Example #3
0
def read_data(batch_size = 2000, batch_number = 0, datatype = "train", size = 512):
    files = os.listdir('cancer_data/inputs')
    trd, trl  = [], []
    count = -1 # make sure 0 indexed
    total = count_files(pattern = datatype)            
    batch_size = min(batch_size, total)

    begin_batch = (batch_number*batch_size) % total
    end_batch   = (batch_number+1) * batch_size % total
    if end_batch == 0:
        end_batch = (batch_number+1) * batch_size
    print(total, begin_batch, end_batch)
    #for f in tqdm(files):
    for f in files:
       if datatype in f:
            count += 1
            #print(count, begin_batch, end_batch)           
            if begin_batch >  end_batch:
                if count >= end_batch and count < begin_batch:
                   continue 
            elif count not in range(begin_batch, end_batch):
                continue
            m = transform.resize(io.imread('cancer_data/inputs/' + f), (size,size,3), mode='constant')
            n = transform.resize(io.imread('cancer_data/outputs/' + f), (size,size,3), mode='constant')
            trd.append(whiten_data(m))
            # Don't whiten labels, only keep 1 layer
            trl.append(n[:,:,1])
            #print(f)
            #print(n[:,:,1])
            if f == "pos_test_000072.png":
                s = "" if size == 512 else str(size)
                pickle(trd[-1], "testImageData"+s)
                pickle(trl[-1], "testImageLabels"+s)
    return trd, trl
Example #4
0
def _registerEnumPicklers(): 
    from copy_reg import constructor, pickle
    def reduce_enum(e):
        enum = type(e).__name__.split('.')[-1]
        return ( _tuple2enum, ( enum, int(e) ) )
    constructor( _tuple2enum)
    pickle(openravepy_int.IkParameterizationType,reduce_enum)
Example #5
0
def getBcodeImgs(loadPath, savePath, rng):
    c = 0
    windowSize = 100
    # TODO: determine this range automatically
    xrng = (3.5, 8.5)
    yrng = (0, 2.5 + 1)
    infpersistence = 3.5 - .1

    for i in range(rng):
        saveData = {b'x': [], b'y': [], b'id': []}
        data = unpickle(loadPath + str(i))
        X = data[b'x']
        Y = data[b'y']

        for barcodes in X:
            print(barcodes)
            c += 1
            plotPoints = []
            for barcode in barcodes:
                # print(barcode)
                if (barcode[1][0] != barcode[1][1]):
                    if (barcode[1][1] != 'inf'):
                        persistence = barcode[1][1] - barcode[1][0]
                        plotPoints.append((barcode[1][0], persistence))
                    else:
                        k = 1
                        plotPoints.append((barcode[1][0], infpersistence))
            M = getBcodeImg(plotPoints, xrng, yrng, windowSize, windowSize)
            saveData[b'x'].append(M)
        saveData[b'y'] = Y

        pickle(saveData, savePath + str(i))
        saveData = {b'x': [], b'y': [], b'id': []}
Example #6
0
def getBcodeImgsSeparated(loadPath, savePath, rng):
    c = 0
    windowSize = 100
    # SCOPE 1.55
    # xrng = (3.5,8.5)
    # yrng = (0,2.5+1)
    # SCOPE 2.70
    xrng = (0, 8.5)
    yrng = (0, 2.5 + 1)
    infpersistence = 3.5 - .1

    for i in [range(rng + 1)]:
        data = unpickle(loadPath + str(i))
        X = data[b'x']
        Y = data[b'y']

        for j, barcodes in enumerate(X):
            print(str(i) + '-' + str(j))
            c += 1
            plotPoints = []
            for barcode in barcodes:
                # print(barcode)
                if (barcode[1][0] != barcode[1][1]):
                    if (barcode[1][1] != 'inf'):
                        persistence = barcode[1][1] - barcode[1][0]
                        plotPoints.append((barcode[1][0], persistence))
                    else:
                        k = 1
                        plotPoints.append((barcode[1][0], infpersistence))
            M = getBcodeImg(plotPoints, xrng, yrng, windowSize, windowSize)
            saveData = {}
            saveData[b'x'] = [M]
            saveData[b'y'] = Y[j]

            pickle(saveData, savePath + '/' + str(i) + '/' + str(j))
Example #7
0
def getBcodeRange(loadPath, toFileNum):
    maxX = 0
    minX = 100000
    maxY = 0
    minY = 100000
    xvals = []
    yvals = []

    for i in range(toFileNum + 1):
        print(i)
        data = unpickle(loadPath + str(i))
        X = data[b'x']
        for barcodes in X:
            for b in barcodes:
                xvals.append(b[1][0])
                if b[1][0] > maxX:
                    maxX = b[1][0]
                if b[1][0] < minX:
                    minX = b[1][0]

                if (b[1][1] != 'inf'):
                    per = b[1][1] - b[1][0]
                    if per > maxY:
                        maxY = per
                    if per < minY:
                        minY = per
                    yvals.append(per)
    print("x" + str(minX) + " - " + str(maxX))
    print("y" + str(minY) + " - " + str(maxY))
    save = {}
    save[b'x'] = xvals
    save[b'y'] = yvals
    pickle(save, 'xyfreq207')
Example #8
0
def main(num_epochs=10,percent_validation=0.05,percent_test=0.10,edge_len=33,
			num_regularization_params = 20):
	rng_state = np.random.get_state()

	if(platform.system() == 'Darwin'):
		folder = '/Users/dominicdelgado/Documents/Radiogenomics/bratsHGG/jpeg/'
	else:
		folder = '/home/ubuntu/data/jpeg/'

	# Calculate mean images
	T1_mean, T1c_mean, T2_mean, FLAIR_mean = get_all_mean_volumes(folder)

	# Generate test, training, and validation sets
	patient_list = range(NUM_PATIENTS)
	np.random.shuffle(patient_list)
	num_validation = int(np.floor(NUM_PATIENTS*percent_validation))
	num_test = int(np.floor(NUM_PATIENTS*percent_test))
	num_train = NUM_PATIENTS - num_test - num_validation

	train_set = patient_list[:num_train]
	test_set = patient_list[num_train:num_train+num_test]
	validation_set = patient_list[num_train+num_test:]

	# Try some different parameters from the range 1e-6 to 1e-2
	#l1_reg = 10**(np.random.rand(num_regularization_params)*4 - 4)
	#l2_reg = 10**(np.random.rand(num_regularization_params)*4 - 4)
	l1_reg = np.asarray([0])
	l2_reg = np.asarray([0])

	best_l1 = l1_reg[0]
	best_l2 = l2_reg[0]
	best_test_pct = 0
	best_val_pct = 0
	data_valid = False

	# Train network
	for i in range(l2_reg.shape[0]):
		val_pct, test_pct = train_net(folder = folder, train_set=train_set, 
					validation_set=validation_set, test_set=test_set, 
					num_epochs = num_epochs, l1_reg = l1_reg, l2_reg = l2_reg,
					edge_len = edge_len,
					T1_mean=T1_mean, T1c_mean=T1c_mean, T2_mean=T2_mean, 
					FLAIR_mean=FLAIR_mean)
		if (not data_valid) or (test_pct > best_test_pct):
			best_l1 = l1_reg[0]
			best_l2 = l2_reg[0]
			best_test_pct = 0
			best_val_pct = 0
			data_valid = True

	# Report results and save
	print "Achieved test error of %f with l1 = %f and l2 = %f." % (best_test_pct, best_l1, best_l2)

	with open(folder + 'rng_state.dat') as f:
		pickle(rng_state, f)

	return 0
Example #9
0
    def init_char_tag_encoding(self,
                               use_pretrained_embeddings=True,
                               embedding_dim=100):
        print("初始化字向量。")
        if self.mode == 'train':  #如果是训练,加载预训练好的,或者随机初始化。
            if use_pretrained_embeddings == True:
                print("读取预训练的词向量")
                embeddings = pickle.load(
                    open(run_time.PATH_PRETRAINED_EMBEDDINGS, 'rb'))
            else:
                print("随机初始化一份词向量")
                embeddings = np.float32(np.random.uniform(-0.5, 0.5, \
                                                               (len(self.char_id_map), embedding_dim)))
        else:  #如果是其他模式,加载模型自己训练得到的词向量即可
            print("加载模型自己的词向量")
            embeddings = pickle(open(run_time.PATH_EMBEDDINGS, 'rb'))
        #将初始化后的嵌入向量添加到计算图找那个
        with tf.variable_scope("words"):
            self.embeddings = tf.Variable(embeddings, dtype=tf.float32, trainable=True,\
                                           name="char_embeddings")#词向量是一个变量;当然也可以使用trainable冻结

        #分词标签的独热编码
        wordseg_tag_onehot_np = np.eye(7, dtype=np.float32)
        self.wordseg_tag_onehot = tf.Variable(wordseg_tag_onehot_np,
                                              dtype=tf.float32,
                                              trainable=False,
                                              name="wordseg_tag")

        if_no_answer_onehot_np = np.eye(2, dtype=np.float32)
        self.if_no_answer_embedding = tf.Variable(if_no_answer_onehot_np,
                                                  dtype=tf.float32,
                                                  trainable=False,
                                                  name="wordseg_tag")
Example #10
0
    def try_load_model(self, trained):
        if trained:
            import pickle
            with open(self.model_file, 'rb') as f:
                self.A_dic = pickle.load(f)
                self.B_dic = pickle(f)
                self.Pi_dic = pickle(f)
                self.load_para = True

        else:
            # 状态转移概率(状态-》状态的条件概率)
            self.A_dic = {}
            # 发射概率(状态-》词语的条件概率)
            self.B_dic = {}
            # 状态的初始概率
            self.Pi_dic = {}
            self.load_para = False
Example #11
0
def demo_client():
    in_stream = sys.stdin
    out_stream = sys.stdout
    sys.stdout = sys.stderr
    print("Hello from client!")
    while True:
        cmd = unpickle(in_stream)
        assert isinstance(cmd, tuple)
        if cmd[0] == "exit":
            pickle(out_stream, ("exit",))
            break
        elif cmd[0] == "ping":
            assert isinstance(cmd[1], numpy.ndarray)
            pickle(out_stream, ("pong", cmd[1]))
        else:
            assert False, "unknown: %r" % cmd
        del cmd
        gc.collect()
    print("Exit from client!")
Example #12
0
def genHoms(loadPath, savePath, toRange):

    saveData = {b'x': [], b'y': [], b'id': []}
    c = 0
    for i in range(toRange + 1):
        data = unpickle(loadPath + str(i))
        xs = data[b'x']
        ys = data[b'y']
        # ids = data[b'id']
        print(len(xs))
        for j, m in enumerate(xs):
            print(str(i) + '-' + str(j))
            barcodes, info = genHom(m, False)
            saveData[b'x'].append(barcodes)
            saveData[b'y'].append(ys[j])
            # saveData[b'id'].append(ids[j])
            c += 1
        pickle(saveData, savePath + str(i))
        saveData = {b'x': [], b'y': [], b'id': []}
Example #13
0
def demo():
    if pickle is pickle_shm:
        check_shmmax()
    p = subprocess.Popen([__file__] + sys.argv[1:] + ["--client"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
    for i in range(LoopCount):
        m = numpy.random.randn(MatrixSize, MatrixSize)
        pickle(p.stdin, ("ping", m))
        out, m2 = unpickle(p.stdout)
        assert out == "pong"
        assert isinstance(m2, numpy.ndarray)
        assert m.shape == m2.shape
        assert numpy.isclose(m, m2).all()
        del m2
        gc.collect()
    print("Copying done, exiting.")
    pickle(p.stdin, ("exit",))
    out, = unpickle(p.stdout)
    assert out == "exit"
    p.wait()
    print("Done. Return code %i" % p.returncode)
 def load_sklearn(self, model_file, serialization_mode):
     import pickle
     import joblib
     from azureml.designer.modelspec.sklearn import save_model
     model = None
     try:
         model = joblib.load(model_file)
     except:
         with open(model_file, 'rb') as fp:
             model = pickle(fp)
     save_model(model, self.out_model_path)
Example #15
0
 def do_write_features(self):
     if not os.path.exists(self.feature_path):
         os.makedirs(self.feature_path)
     next_data = self.get_next_batch(train=False)
     b1 = next_data[1]
     num_ftrs = self.layers[self.ftr_layer_idx]['outputs']
     while True:
         batch = next_data[1]
         data = next_data[2]
         ftrs = n.zeros((data[0].shape[1], num_ftrs), dtype=n.single)
         self.libmodel.startFeatureWriter(data + [ftrs], self.ftr_layer_idx)
         
         # load the next batch while the current one is computing
         next_data = self.get_next_batch(train=False)
         self.finish_batch()
         path_out = os.path.join(self.feature_path, 'data_batch_%d' % batch)
         pickle(path_out, {'data': ftrs, 'labels': data[1]})
         print "Wrote feature file %s" % path_out
         if next_data[1] == b1:
             break
     pickle(os.path.join(self.feature_path, 'batches.meta'), {'source_model':self.load_file,
                                                              'num_vis':num_ftrs})
Example #16
0
def preprocess_data(datatype='test', io_batch=2000, size=512):
    s = "" if size == 512 else str(size)
    if datatype in ['test', 'both']:
        test_data, test_labels = read_data(io_batch, 0, "test", size=size)
        shuffleDataAndLabelsInPlace(test_data, test_labels)
        pickle(test_data, "test_data" + s)
        del test_data
        pickle(test_labels, "test_labels" + s)
        del test_labels
    if datatype in ['train', 'both']:
        train_data, train_labels = read_data(io_batch, 0, "train", size=size)
        shuffleDataAndLabelsInPlace(train_data, train_labels)
        pickle(train_labels, "train_labels" + s)
        del train_labels
        pickle(train_data, "train_data" + s)
        del train_data
Example #17
0
def populate_db():
    for pick in tqdm(glob('dl/pickles/*.pickle')):
        for face in pickle(open(pick, 'rb').read()):
            data = {}
            data['folder_name'] = face['imagePath'].split('/')[-2]
            data['image_name'] = face['imagePath'].split('/')[-1]
            data['location'] = {
                'y1': face['loc'][0],
                'x2': face['loc'][1],
                'y2': face['loc'][2],
                'x1': face['loc'][3]
            }
            data['encoding'] = pickle.dumps(face['encoding'])
            data['tagged'] = False
            db.insert_data('faces', temp_dict)
Example #18
0
    def delete(self, request):
        """删除购物车"""
        serializer = CartDeleteSerializer(data=request.data)
        serializer.is_valid(raise_exception=True)
        sku_id = serializer.validated_data.get('sku_id')

        # 构造响应对象
        response = Response(status=status.HTTP_204_NO_CONTENT)

        try:
            user = request.user
        except:
            user = None
        else:
            # 已登录用户操作购物车数据
            # 创建redis连接对象
            redis_conn = get_redis_connection('cart')
            pl = redis_conn.pipeline()
            # 把本次要删除的sku_id从hash字典中移除
            pl.hdel('cart_%d' % user.id, sku_id)
            # 把本次要删除的sku_id从set集合中移除
            pl.srem('selected_%d' % user.id, sku_id)
            pl.execute()

        if not user:
            # 未登录用户操作cookie购物车数据
            #  获取cookie数据
            cart_str = request.COOKIES.get('carts')
            # 把cart_str 转换成cart_dict
            if cart_str:
                cart_dict = pickle.loads(base64.b64decode(cart_str.encode()))

                #  把要删除的sku_id 从cart_dict字典中移除
                if sku_id in cart_dict:
                    del cart_dict[sku_id]

                # if 成立说明cookie字典中还有商品
                if len(cart_dict.keys()):
                    # 把cart_dict转换成 cart_str
                    cart_str = base64.b64encode(pickle(cart_dict)).decode()
                    #  设置cookie
                    response.set_cookie('carts', cart_str)
                else:
                    response.delete_cookie('carts')  # 如果购物车数据已经全部删除,就把cookie移除

        return response
Example #19
0
 def init_embeddings(self, use_pretrained_embeddings, embedding_dim):
     print("初始化词向量。")
     if self.mode == 'train':  #如果是训练,加载预训练好的,或者随机初始化。
         if use_pretrained_embeddings == True:
             print("读取预训练的词向量")
             self.embeddings = pickle.load(
                 open(run_time.PATH_PRETRAINED_EMBEDDINGS, 'rb'))
         else:
             print("随机初始化一份词向量")
             self.embeddings = np.float32(np.random.uniform(-0.25, 0.25, \
                                                            (len(self.word_id_map), embedding_dim)))
     else:  #如果是其他模式,加载模型自己训练得到的词向量即可
         print("加载模型自己的词向量")
         self.embeddings = pickle(open(run_time.PATH_EMBEDDINGS, 'rb'))
     print("词向量shape", self.embeddings)
     # 将word转换为词向量
     with tf.variable_scope("words"):
         print(self.embeddings)
         self._word_embeddings = tf.Variable(self.embeddings, dtype=tf.float32, trainable=True,\
                                        name="_word_embeddings")#词向量是一个变量;当然也可以使用trainable冻结
Example #20
0
parser = argparse.ArgumentParser(description="Train a maximum entropy model.")
parser.add_argument("-N",
                    "--ngram",
                    metavar="N",
                    dest="ngram",
                    type=int,
                    default=3,
                    help="The length of ngram to be considered (default 3).")
parser.add_argument("datafile",
                    type=str,
                    help="The file name containing the features.")
parser.add_argument(
    "modelfile",
    type=str,
    help="The name of the file to which you write the trained model.")

args = parser.parse_args()

print("Loading data from file {}.".format(args.datafile))
print("Training {}-gram model.".format(args.ngram))
print("Writing table to {}.".format(args.modelfile))

dataframe = read_file(args.datafile)
model = train_model(dataframe)
pickle(model)

# YOU WILL HAVE TO FIGURE OUT SOME WAY TO INTERPRET THE FEATURES YOU CREATED.
# IT COULD INCLUDE CREATING AN EXTRA COMMAND-LINE ARGUMENT OR CLEVER COLUMN
# NAMES OR OTHER TRICKS. UP TO YOU.
Example #21
0
with open(tfilename, 'wt') as f:
    f.write(str(data1))
    f.write('\n')
    f.write(data2)
    f.write('\n')
    f.writelines('\n'.join(data3))

# 바이너리 읽기(역직렬화)
with open(bfilename, 'rb') as f:
    b = pickle.load(f)  #loads(문자열로부터 역직렬화)
    print(type(b), ' Binary Read1 | ', b)
    b = pickle.load(f)
    print(type(b), ' Binary Read2 | ', b)
    b = pickle.load(f)
    print(type(b), 'Binary Read3 | ', b)
'''
pickle(바이너리,자료형 유지)
<class 'int'>  Binary Read1 |  77
<class 'str'>  Binary Read2 |  Hello, world!
<class 'list'> Binary Read3 |  ['car', 'animal', 'house']
'''

# 텍스트 읽기
with open(tfilename, 'rt') as f:
    for i, line in enumerate(f, 1):
        print(type(line), 'Text Read' + str(i) + ' | ' + line, end='')
'''
텍스트(자료형 변환)
<class 'str'> Text Read1 | 77
<class 'str'> Text Read2 | Hello, world!
<class 'str'> Text Read3 | car
Example #22
0
 def _insert(txn, db, idx, primary_key, secondary_key, value):
     db.put(primary_key.encode("unicode-escape"), pickle(value), txn)
     idx.put(secondary_key.encode("unicode-escape"),
             pickle(primary_key), txn)
Example #23
0
 def _set(self, txn, key, value):
     self._db.put(key.encode("unicode-escape"), pickle(value), txn)
Example #24
0
#===============================================================================
# heatmap.py
# heatmap generator given, x, y, and intensity data
# author: Taylor Stadeli
# date: Oct. 22, 2015
#
#===============================================================================
import matplotlib.pyplot as plt
import numpy as np
import random
import pickle

with open('test_vectors_2018_2_5_19_3_59.dat', 'rb') as f:
    dataArrays = pickle().load(f)



x = dataArrays[0]
y = dataArrays[1]
z = dataArrays[2]


##generate some random data
#x = []
#y = []
#z = []
#intensity = []
#for i in range (0, 2000):
#    x.append(i)
#for i in range (0, 2000):
#    y.append(i)
Example #25
0
test_labels = np.zeros((10000,))

# process train data
for i in range(1, 6):
    dict = unpickle(os.path.join(base_dir, "data_batch_{}".format(i)))
    dict_data = dict['data']
    for x, data in enumerate(dict_data):
        columns = covert_rgb(data)
        train_data[((i - 1) * 10000) + x] = columns
        print(((i - 1) * 10000) + x)

    dict_labels = dict['labels']
    for x, label in enumerate(dict_labels):
        train_labels[((i - 1) * 10000) + x] = label

pickle(os.path.join(base_dir, 'train_data'), train_data)
pickle(os.path.join(base_dir, 'train_labels'), train_labels)

#process test data
test_dict = unpickle(os.path.join(base_dir, "test_batch"))

test_temp_data = test_dict['data']
for x, data in enumerate(test_temp_data):
    columns = covert_rgb(data)
    test_data[x] = columns
    print(x)

test_labels = test_dict['labels']

pickle(os.path.join(base_dir, 'test_data'), test_data)
pickle(os.path.join(base_dir, 'test_labels'), test_labels)
Example #26
0
		html = response.read()
		num = ''.join([str(i) for i in html.split() if i.isdigit()])
		num_dict[num] = num_dict.get(num, "") + html
	return num_dict['']

print "*******QUESTION 4*******"
print "The picture was the key here -- if you moused over or clicked, you went to the url:"
print "http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing=12345."
print "Following it was fruitless, but there was a comment in the html file saying 400x was more than enough"
print "and urllib would be helpful.  Discovered that the responses held the key -- for the value '' message was:  "
print chainsaw()
print 
print "Trying", BASE_URL + "peak.html was the correct answer."
print
print "***********************"
print

############################################################
##  Question 5                                            ##
##  http://www.pythonchallenge.com/pc/def/equality.html   ##
############################################################
import pickle

def pickle():
	file = open('ptext.txt', 'r')
	decoded = pickle.load(file)
	return decoded

print pickle()

Example #27
0
import gc
import sys

sys.path.insert(0, filepath + "HierarchicalModel/rfrf")
import hierarchicalModel_rfrf as myfunc

model = pickle.load(open("E:/HierarchicalModel_rfrf.pickle", "rb"))
gc.collect()

fromTrainData = {}
fromTrainData["CST_1"] = model.result["main"].predict(X)
gc.collect()
fromTrainData["full"] = model.predict(X)
gc.collect()


Xtest = hstack(resultTest).toarray()
gc.collect()
fromTestData = {}
fromTestData["CST_1"] = model.result["main"].predict(Xtest)
gc.collect()
fromTestData["full"] = model.predict(Xtest)
gc.collect()


RESULT = {}
RESULT["train"] = fromTrainData
RESULT["test"] = fromTestData

pickle(RESULT, open("", "wb"))
Example #28
0
        print(arg)
a = aClass()
a.b = 2
a.aDef(5)
"""

# @-<< define s for Data2 test >>
# @+<< define s2 for Data2 test >>
# @+node:ekr.20140603074103.17644: *3* << define s2 for Data2 test >>
s2 = """
# module s2
import s
"""
# @-<< define s2 for Data2 test >>
dt = stc.Data2()
files, p0_time, root_d = pass0(files)
dt_time = pass1(files, root_d)
if "pickle" in flags:
    pickle(root_d)
if "dump_contexts_d" in flags:
    dump_contexts_d(dt)
if "dump_global_d" in flags:
    dump_global_d(dt)
if "report" in flags:
    report()
# @-others
# @@language python
# @@tabwidth -4

# @-leo
Example #29
0
 def pickle_to_msg(self):
     s = SerialisedMessage()
     s.msg = pickle(self)
     s.type = "zlibed_pickled_data"
     return s
Example #30
0
        print(arg)
a = aClass()
a.b = 2
a.aDef(5)
'''

#@-<< define s for Data2 test >>
#@+<< define s2 for Data2 test >>
#@+node:ekr.20140603074103.17644: *3* << define s2 for Data2 test >>
s2 = '''
# module s2
import s
'''
#@-<< define s2 for Data2 test >>
dt = stc.Data2()
files, p0_time, root_d = pass0(files)
dt_time = pass1(files, root_d)
if 'pickle' in flags:
    pickle(root_d)
if 'dump_contexts_d' in flags:
    dump_contexts_d(dt)
if 'dump_global_d' in flags:
    dump_global_d(dt)
if 'report' in flags:
    report()
#@-others
#@@language python
#@@tabwidth -4

#@-leo
Example #31
0
 def g(txn):
     db2.append(pickle("item_2"), txn)
     Timeout(3.0).wait()
     db1.put(b"key", b"value_2", txn)
Example #32
0
"""A simple key/value store API. Like shelve but uses JSON instead of
pickle (so data must be simple structures instead of arbitrary Python
objects).
"""
try:
    import ujson as json
except ImportError:
    try:
        import json
    except ImportError:
        import simplejson as json

import collections
import os
try:
    import cPickle as pickle
except ImportError:
    import pickle


class JSONShelf(collections.MutableMapping):
    # Object lifetime.

    def save(self):
        """Persist the current in-memory state of the mapping."""
        raise NotImplementedError

    def close(self):
        """Close any opened resources."""
        pass
Example #33
0
"""A simple key/value store API. Like shelve but uses JSON instead of
pickle (so data must be simple structures instead of arbitrary Python
objects).
"""
import json
import collections
import os
import sqlite3
try:
    import cPickle as pickle
except ImportError:
    import pickle


class JSONShelf(collections.MutableMapping):
    # Object lifetime.

    def save(self):
        """Persist the current in-memory state of the mapping."""
        raise NotImplementedError

    def close(self):
        """Close any opened resources."""
        pass

    def __del__(self):
        self.close()


    # As a context manager, the shelf saves on exit.
Example #34
0
 def g(txn):
     db2.append(pickle("item_2"), txn)
     Timeout(3.0).wait()
     db1.put(b"key", b"value_2", txn)
Example #35
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        description='Compute ICA components over set of stacked spectra, save those out, and pickle ICA model')
    parser.add_argument(
        '--pattern', type=str, default='stacked*exp??????.*', metavar='PATTERN',
        help='File pattern for stacked sky fibers.'
    )
    parser.add_argument(
        '--path', type=str, default='.', metavar='PATH',
        help='Path to work from, if not ''.'''
    )
    parser.add_argument(
        '--n_components', type=int, default=40, metavar='N_COMPONENTS',
        help='Number of ICA/PCA/etc. components'
    )
    parser.add_argument(
        '--method', type=str, default='ICA', metavar='METHOD', choices=['ICA', 'PCA', 'SPCA', 'NMF']
        help='Which dim. reduction method to use'
    )
    parser.add_argument(
        '--ivar_cutoff', type=float, default=0.001, metavar='IVAR_CUTOFF',
        help='data with inverse variace below cutoff is masked as if ivar==0'
    )
    parser.add_argument(
        '--max_iter', type=int, default=1200, metavar='MAX_ITER',
        help='Maximum number of iterations to allow for convergence.  For SDSS data 1000 is a safe number of ICA, while SPCA requires larger values e.g. ~2000 to ~2500'
    )
    parser.add_argument(
        '--filter_split_path', type=str, default=None, metavar='FILTER_SPLIT_PATH',
        help='Path on which to find filter_split file'
    )
    parser.add_argument(
        '--filter_cutpoint', type=str, default=None, metavar='FILTER_CUTPOINT',
        help='Point at which to divide between ''normal'' flux and emission flux'
    )
    parser.add_argument(
        '--which_filter', type=str, default='both', metavar='WHICH_FILTER',
        help='Whether to use ''em''isson, ''nonem''isson, or ''both'''
    )
    args = parser.parse_args()

    comb_flux_arr, comb_exposure_arr, comb_ivar_arr, comb_masks, comb_wavelengths = \
                load_all_in_dir(args.path, use_con_flux=False, recombine_flux=False,
                                pattern=args.pattern, ivar_cutoff=args.ivar_cutoff)

    filter_split_arr = None
    if args.filter_split_path is not None:
        fstable = Table.read(args.filter_split_path, format="ascii.csv")
	filter_split_arr = fstable["flux_kurtosis_per_wl"] < args.filter_cutpoint

    mask_summed = np.sum(comb_masks, axis=0)
    min_val_ind = np.min(np.where(mask_summed == 0))
    max_val_ind = np.max(np.where(mask_summed == 0))
    print "For data set, minimum and maximum valid indecies are:", (min_val_ind, max_val_ind)

    flux_arr = comb_flux_arr
    if filter_split_arr is not None and args.which_filter != "both":
        flux_arr = np.array(comb_flux_arr, copy=True)

        if args.which_filter == "nonem":
            new_flux_arr[:,filter_split_arr] = 0
        elif args.which_filter == "em":
            new_flux_arr[:,~filter_split_arr] = 0

    sources, components, model = dim_reduce(flux_arr, args.n_components, args.method, args.max_iter, random_state)
    np.savez(data_file.format(args.method, args.which_filter), sources=sources, components=components,
                exposures=comb_exposure_arr, wavelengths=comb_wavelengths)
    pickle(model, args.path, args.method, args.which_filter)
Example #36
0
def _set(txn, db, key, value):
    db.put(key.encode("unicode-escape"), pickle(value), txn)
Example #37
0
bc = BalanceCascade(estimator=SVC(gamma='auto'),
                    random_state=100,
                    n_max_subset=5)
x_train_resam, y_train_resam = bc.fit_sample(x_train, y_train)

with open('values_undersampling.pkl', 'wb') as f:
    pickle.dump(x_train_resam, f)
    pickle.dump(y_train_resam, f)
    pickle.dump(test, f)

# In[13]:

with open('values_undersampling.pkl', 'rb') as f:
    x_train_resam = pickle.load(f)
    y_train_resam = pickle.load(f)
    test = pickle(f)

# ### Xgbosst

# In[ ]:

import xgboost as xgb
from sklearn.model_selection import GridSearchCV
import numpy as np

param_grid = {
    'max_depth': [3, 4, 5, 8, 10],
    'n_estimators': [50, 100, 200, 400, 600, 800, 1000],
    'laerning_rate': [0.1, 0.2, 0.3],
    'gamma': [0, 0.2],
    'subsample': [0.8, 1],
Example #38
0
 def _insert(txn, db, idx, primary_key, secondary_key, value):
     db.put(primary_key.encode("unicode-escape"), pickle(value), txn)
     idx.put(secondary_key.encode("unicode-escape"), pickle(primary_key), txn)
Example #39
0
 def load_model(self):
     
     model = pickle("model\\" + self.fn_model + ".pkl")
     
     return model
Example #40
0
 def _set(self, txn, key, value):
     self._db.put(key.encode("unicode-escape"), pickle(value), txn)
Example #41
0
import pickle


try:
    with open('man_file_pickle.txt', 'wb') as man_file, open('man_file.txt') as date:
        pickle.dump(date.readline(), man_file)
    with open('man_file_pickle.txt', 'rb') as man_file_restore:
        output = pickle.load(man_file_restore)
        print(output)
except IOError as io_err:
    print('File error.' + str(io_err))
except pickle.PickleError as pickle_err:
    pickle('pickle error:', pickle_err)
Example #42
0
def _set(txn, db, key, value):
    db.put(key.encode("unicode-escape"), pickle(value), txn)