def load_sick_data(): """ Attempt to load sick data from binary, otherwise fall back to txt. """ try: if config.DEBUG: stdout.write('loading sick from archives.. ') sick_data = [] for element in sPickle.s_load(open('sick.pickle')): sick_data.append(element) except IOError: if config.DEBUG: stdout.write(' error - loading from txt-files..') sick_data = [] for line in open(os.path.join(config.working_path, 'SICK_all.txt')): if line.split()[0] != 'pair_ID': sick_data.append(load_sick_data_from_folder(line.split()[0])) # Sort according to SICK_all.txt with open('sick.pickle', 'wb') as out_f: sPickle.s_dump(sick_data, out_f) if config.DEBUG: stdout.write(' done!\n') return sick_data
def load_sick_data(): """ Attempt to load sick data from binary, otherwise fall back to txt. """ try: if config.DEBUG: stdout.write('loading sick from archives.. ') sick_data = [] for element in sPickle.s_load(open('sick.pickle')): sick_data.append(element) except IOError: if config.DEBUG: stdout.write(' error - loading from txt-files..') sick_data = [] for line in open(os.path.join(config.working_path,'SICK_all.txt')): if line.split()[0] != 'pair_ID': sick_data.append(load_sick_data_from_folder(line.split()[0])) # Sort according to SICK_all.txt with open('sick.pickle', 'wb') as out_f: sPickle.s_dump(sick_data, out_f) if config.DEBUG: stdout.write(' done!\n') return sick_data
def test_empty(self): with open(self.testfn, 'wb') as f: sPickle.s_dump([], f) with open(self.testfn, 'rb') as f: for elt in sPickle.s_load(f): self.fail('found element for stream that should be empty: ' + str(elt))
def load(self, datasetName, use_sPickle=True): name = self.extract_last_component(datasetName) logger.info("Loading dataset: {}".format(name)) if use_sPickle: return sPickle.s_load(open(self.dataset_location+"/"+name, "rb" )) else: return pickle.load(open(self.dataset_location+"/"+name, "rb" ))
def readfigurepositions(fname): filedir = os.getcwd() + '\\' + fname copydir = filedir + '-copy' shutil.copy(filedir, copydir) copyname = fname + '-copy' f = open(copyname) pos = array(list(sPickle.s_load(f))) f.close() return pos
import numpy as np import librasa import sPickle source_path = "/root/data/tzanetakis/ver9.0/" dest_path = "/root/data/tzanetakis/ver9.1/" def wave2mel(sample): logam = librosa.logamplitude melgram = librosa.feature.melspectrogram longgrid = logam(melgram(y=sample, sr=22050, n_fft=1024, n_mels=128), ref_power=1.0) return longgrid.flatten() for root, dirs, files in os.walk(source_path): for name in files: if ".p" in name: arr = sPickle.s_load(open(root + '/' + name, 'rb')) dest = [] for a in arr: b = wave2mel(a) dest.append(b) dest = np.asarray(dest) print name, dest.shape sPickle.s_dump(dest, open(dest_path + name))
import sPickle lst = range(101) sPickle.s_dump(lst, open('lst.spkl', 'w')) sum = 0 for element in sPickle.s_load(open('lst.spkl')): sum += element print sum print def process_data(s): return len(s) sPickle.s_dump((process_data(line.split(',')[0]) for line in open('input.csv')), open('lst1.spkl', 'w')) for elt in sPickle.s_load(open('lst1.spkl')): print elt print f = open('lst2.spkl', 'w') for line in open('input.csv'): sPickle.s_dump_elt(process_data(line.split(',')[0]), f) f.close() for elt in sPickle.s_load(open('lst2.spkl')): print elt print l = range(10)
def sPickleToArr(arr, fname): counter = 0 for x in sPickle.s_load(open(source_path + fname)): arr[counter] = x counter+= 1
if ca_dict_computed == False: # compute the densities - i.e. run CA algorithm print 'Running CA clustering algorithm - computing density matrix...' ca_densities = dict() ca_densities = ca.compute_density_matrix(uni_target_ips, uni_attacker_ips, binary_data, train_w_length, i) sPickle.s_dump(ca_densities.iteritems(), open("densities" + str(i) + ".spkl", "w")) else: # load the computed density matrix for the window print 'Loading CA density matrix from file...' #ca_densities = dict() ca_densities = dict( sPickle.s_load(open("densities" + str(i) + ".spkl"))) # compute the denominator needed for the similarities and store it in a dictionary print 'Computing the denominator for similarities...' sim_denom = dict() sim_denom = sim.compute_denominator(train_set, uni_target_ips, train_w_length, offset, i, start_day) # find similarities between victims print 'Computing the similarities between victims...' similarity = dict() similarity = sim.compute_similarities(uni_target_ips, train_w_length, binary_data, sim_denom, i) # compute the top k neigbhors of each victim based on the similarities print 'Computing top neighbors...'
def _load(self): with open(self.testfn, 'rb') as f: return list(sPickle.s_load(f))