示例#1
0
    def save_trial_data_to_file(self, trial_data, experiment_data):
        # add the trial number as the first line.
        self.current_saved_file.write('Trial:{trial_number}\n'.format(trial_number=trial_data['Trial#']))

        # loop over all keys and values.
        for (key, value) in trial_data.items():
            if key != 'Trial#':
                self.current_saved_file.write('{key}:{value}\n'.format(key=key, value=value))

        # loop over all experiment_data
        self.current_saved_file.write(experiment_data.to_string())

        loaded_dict = dict()
        if trial_data['Trial#'] > 1:
            loaded_dict = mfp.loadmat(self.directory_path +
                                      self.inner_directory_path +
                                      self.current_saved_file_name + '.mat')

        trial_num_string = 'trial_' + str(trial_data['Trial#'])
        # delete the key with the # char because Matlab cannot read it as attribute in it's struct.
        trial_data['TrialNum'] = trial_data['Trial#']
        del trial_data['Trial#']

        loaded_dict[trial_num_string] = {'trial_data': trial_data,
                                         'experiment_data': experiment_data.to_dict()}

        mfp.savemat(self.directory_path +
                    self.inner_directory_path +
                    self.current_saved_file_name + '.mat', loaded_dict)
        pass
def save_mat_file(matfile_name):
    """hourize and save data to mat-file"""
    cpr = common_preparer
    data_to_load = {
        'Nodes': hourize(cpr.eq),
        'NodesPQ': hourize(cpr.pq),
        'NodesPV': hourize(cpr.pv),
        'NodesSW': hourize(cpr.sw),
        'Shunts': hourize(cpr.sh),
        'Lines': hourize(cpr.el),
        'GroupConstraints': hourize_group_constraints(cpr.cs, cpr.crs),
        'GroupConstraintsRges': hourize(cpr.crs),
        'Sections': hourize(cpr.s),
        'SectionLines': hourize(cpr.sl),
        'SectionsImpex': hourize(cpr.si),
        'SectionLinesImpex': hourize(cpr.sli),
        'SectionRegions': {'InputData': [cpr.ia]},
        'Demands': hourize(cpr.ed),
        'Supplies': hourize(cpr.es),
        'ImpexBids': hourize(cpr.eimp, mat4py.ZeroSize(5)),
        'Generators': hourize(cpr.eg),
        'PriceZoneDemands': hourize(cpr.pzd),
        'Fuel': {'InputData': [cpr.fd]},
        'GeneratorsDataLastHour': {'InputData': [cpr.lh]},
        'HourNumbers': [{i} for i in range(C.HOURCOUNT)],
        'Settings': {'InputData': transpose(cpr.stngs)},
        'DemandResponse': {'InputData': [cpr.dr] if cpr.dr else mat4py.ZeroSize(3)},
        'PeakSo': {'InputData': [cpr.ps]},
        'PriceZoneSettings': {'InputData': [cpr.pz_dr]}
    }

    mat4py.savemat(matfile_name, {
        'HourData': data_to_load, 'Fuel': data_to_load['Fuel']})
示例#3
0
    def select_simulation_data(self, data, simulation):
        '''
        Extract data from output data
        '''
        # self.apply_ticker += 1
        # print('Extracting data from parser ' + str(self.apply_ticker))
        selected_data = dict()

        selected_data['nodeIDs'] = data[
            'output/SpatialReport_New_Infections.bin']['nodeids'].tolist()
        for datatype in ['New_Infections', 'Prevalence', 'Population']:
            selected_data[datatype] = data['output/SpatialReport_' + datatype +
                                           '.bin']['data'].tolist()
        for name, value in data['output/InsetChart.json']['Channels'].items():
            selected_data[(('all' + name).replace(' ', '').replace(
                '_', '').replace('(', '').replace(')',
                                                  ''))[0:20]] = value['Data']
        selected_data['age_bins'] = data[
            'output/AgeAtInfectionHistogramReport.json']['Channels'][
                'Age_Bin_Upper_Edges']['Data']
        selected_data['age_distribution'] = data[
            'output/AgeAtInfectionHistogramReport.json']['Channels'][
                'Accumulated_Binned_Infection_Counts']['Data']
        selected_data['sim_id'] = simulation.id
        selected_data['exp_id'] = simulation.experiment_id

        outdir = os.path.join(self.working_dir, 'Experiments',
                              selected_data['exp_id'])

        savemat(
            os.path.join(outdir,
                         'output_' + str(selected_data['sim_id']) + '.mat'),
            selected_data)

        return simulation.tags
示例#4
0
def main():
    file = 'ElQuijote.txt'
    f = open(file, 'r')
    comp = compress(f.read())
    f.close()

    print(type(comp))

    str1 = ''.join(bin(e) for e in comp)

    f1 = open('testCompressed.dat', 'wb')
    f1.write(str1)
    f1.close()

    #print(comp)

    #save
    f_comp = open('co/testCompress.txt', 'wb')
    pickle.dump(comp, f_comp)
    f_comp.close()

    f_comp = open('co/testCompress.np', 'wb')
    numpy.save(f_comp, comp)
    f_comp.close()

    f_comp = csv.writer(open("co/testCompress.csv", "w"))
    f_comp.writerow(comp)

    m4p.savemat('co/testCompress.mat', {'dic': comp})

    printSummary(file, 'co/testCompress.txt')
示例#5
0
def save_UCI_DSADS(x_mat,
                   y_mat,
                   file_path='data/1_dataset_UCI_DSADS/Raw/',
                   val_size=0.15,
                   test_size=0.15):
    x_shape = x_mat.shape
    x_mat = x_mat.reshape((x_shape[0], x_shape[1], x_shape[2], -1))
    seg_num = x_shape[0] * x_shape[2]
    idx_vec = np.arange(seg_num)
    for s in range(8):
        np.random.shuffle(idx_vec)
        x_mat_s = x_mat[:, s, :, :].reshape((seg_num, -1))
        y_mat_s = y_mat[:, s, :].reshape(seg_num)
        x_train, x_test, y_train, y_test = train_test_split(
            x_mat_s[idx_vec, :],
            y_mat_s[idx_vec],
            test_size=(val_size + test_size))
        x_test, x_val, y_test, y_val = train_test_split(x_test,
                                                        y_test,
                                                        test_size=test_size /
                                                        (val_size + test_size))
        data = {
            'x_train': x_train.tolist(),
            'x_val': x_val.tolist(),
            'x_test': x_test.tolist(),
            'y_train': y_train.tolist(),
            'y_val': y_val.tolist(),
            'y_test': y_test.tolist()
        }
        m4p.savemat(file_path + str(s) + '.mat', data)
示例#6
0
def main():	
	file='or/test.txt'
	f = open(file,'r')
	comp = compress(f.read())
	f.close()
	
	print comp
	
	#save
	f_comp = open('co/testCompress.txt','wb')
	pickle.dump(comp,f_comp)
	f_comp.close()
	
	f_comp = open('co/testCompress.np','wb')
	numpy.save(f_comp,comp)
	f_comp.close()
	
	f_comp = csv.writer(open("co/testCompress.csv", "w"))
	f_comp.writerow(comp)
	
	
	m4p.savemat('co/testCompress.mat', {'dic':comp})

	
	printSummary(file, 'co/testCompress.txt')
示例#7
0
def main():
    #
    # get arguments and invoke the conversion routines
    #

    parser = argparse.ArgumentParser(
        description='Convert Matlab '
        'MAT-files to JSON formated text files, and the other way around.')

    parser.add_argument('file',
                        nargs='+',
                        help='path to a Matlab MAT-file or a JSON file')
    parser.add_argument('--remove-input',
                        action='store_const',
                        const=True,
                        default=False,
                        help='remove input file after conversion')
    parser.add_argument('-f',
                        '--force',
                        action='store_const',
                        const=True,
                        default=False,
                        help='overwrite existing files when converting')
    args = parser.parse_args()

    for path in args.file:
        spl = os.path.splitext(path)
        ext = spl[1].lower()

        if ext == '.mat':
            dest = spl[0] + '.json'
            try:
                if os.path.exists(dest) and not args.force:
                    raise Exception('File {} already exists.'.format(dest))
                data = loadmat(path)
                with open(dest, 'w') as fp:
                    json.dump(data, fp)
                if args.remove_input:
                    os.remove(path)
            except Exception as e:
                print('Error: {}'.format(e))
                exit(1)

        elif ext == '.json':
            dest = spl[0] + '.mat'
            try:
                if os.path.exists(dest) and not args.force:
                    raise Exception('File {} already exists.'.format(dest))
                with open(path) as fp:
                    data = json.load(fp)
                savemat(dest, data)
                if args.remove_input:
                    os.remove(path)
            except RuntimeError as e:
                print('Error: {}'.format(e))
                exit(1)
        else:
            print('Unsupported file extension on file: {0}'.format(path))
            exit(1)
示例#8
0
 def test_save_load_mat(self):
     """Test writing mat files, and reading them again"""
     for filename, result in test_data['loadmat'].iteritems():
         tempname = 'data/{}.temp'.format(filename)
         try:
             mat4py.savemat(tempname, result)
             data = mat4py.loadmat(tempname, meta=False)
         finally:
             os.remove(tempname)
         self.assertEqual(data, result)
示例#9
0
 def test_save_load_mat(self):
     """Test writing mat files, and reading them again"""
     for filename, result in test_data['loadmat'].iteritems():
         tempname = 'data/{}.temp'.format(filename)
         try:
             mat4py.savemat(tempname, result)
             data = mat4py.loadmat(tempname, meta=False)
         finally:
             os.remove(tempname)
         self.assertEqual(data, result)
示例#10
0
def main():
    #
    # get arguments and invoke the conversion routines
    #

    parser = argparse.ArgumentParser(
        description='Convert Matlab '
        'MAT-files to JSON formated text files, and the other way around.')

    parser.add_argument(
        'file', nargs='+',
        help='path to a Matlab MAT-file or a JSON file')
    parser.add_argument(
        '--remove-input', action='store_const', const=True,
        default=False, help='remove input file after conversion')
    parser.add_argument(
        '-f', '--force', action='store_const', const=True,
        default=False, help='overwrite existing files when converting')
    args = parser.parse_args()

    for path in args.file:
        spl = os.path.splitext(path)
        ext = spl[1].lower()

        if ext == '.mat':
            dest = spl[0] + '.json'
            try:
                if os.path.exists(dest) and not args.force:
                    raise Exception('File {} already exists.'.format(dest))
                data = loadmat(path)
                with open(dest, 'w') as fp:
                    json.dump(data, fp)
                if args.remove_input:
                    os.remove(path)
            except Exception as e:
                print('Error: {}'.format(e))
                exit(1)

        elif ext == '.json':
            dest = spl[0] + '.mat'
            try:
                if os.path.exists(dest) and not args.force:
                    raise Exception('File {} already exists.'.format(dest))
                with open(path) as fp:
                    data = json.load(fp)
                savemat(dest, data)
                if args.remove_input:
                    os.remove(path)
            except RuntimeError as e:
                print('Error: {}'.format(e))
                exit(1)
        else:
            print('Unsupported file extension on file: {0}'.format(path))
            exit(1)
示例#11
0
    def write_mat_file(self, data):
        path = os.path.dirname(self.file_path.text()) \
            if os.path.isdir(os.path.dirname(self.file_path.text())) else self.parent.expanduser_dir
        filename = QFileDialog.getSaveFileName(None, "Save File", path, "MatLab file (*.mat)")[0]
        if filename == '':
            self.progress.close()
            return

        savemat(filename, data)
        self.file_path.setText(filename)
        self.progress.setLabelText(_('File has been created.'))
        self.progress.setValue(100)
示例#12
0
文件: tests.py 项目: znicholls/mat4py
 def test_save_load_mat2(self):
     """Test writing mat files, and reading them again, using fileobjects"""
     for filename, result in test_data['loadmat'].items():
         with self.subTest(msg=filename):
             tempname = 'data/{}.temp'.format(filename)
             try:
                 with open(tempname, 'wb') as fileobj:
                     mat4py.savemat(fileobj, result)
                 with open(tempname, 'rb') as fileobj:
                     data = mat4py.loadmat(fileobj, meta=False)
             finally:
                 os.remove(tempname)
             self.assertEqual(data, result)
示例#13
0
def save_one_AB_mat(data_name, data_path, is_walking=False):
    idx_x = np.arange(0, 368)
    x_train, y_train, x_val, y_val, x_test, y_test = \
    read_csv(data_path,idx_x, X_dim = 2, is_walking = is_walking)
    data = {
        'x_train': x_train.tolist(),
        'x_val': x_val.tolist(),
        'x_test': x_test.tolist(),
        'y_train': y_train.tolist(),
        'y_val': y_val.tolist(),
        'y_test': y_test.tolist()
    }
    m4p.savemat(data_name, data)
示例#14
0
def hmat(A, output_exec_time=False):
    import mat4py
    mat4py.savemat('temp.mat', {'adj': A.tolist()})
    cmd = [
        'matlab -nodesktop -r "load(\'temp.mat\'); tic; addpath(\'fh\'); h = hierarchy(adj); t= toc; save(\'temp.mat\'); quit "'
    ]
    sp = subprocess.run(cmd, shell=True,
                        stdout=subprocess.DEVNULL)  #dump output to /dev/null
    out = mat4py.loadmat('temp.mat')
    #print(out)
    if 't' in out.keys():
        print('run completed. alg_runtime =', out['t'])
    if output_exec_time:
        return out['h'], out['t']
    else:
        return out['h']
示例#15
0
def convert_txt2mat(filetxt):
    """
    Convert arcontrol_data.TXT to arcontrol_data.MAT. Just like "BF_arc2mat.m"
    :param filetxt:
    :return: None
    """
    pattern = re.compile(r'\.txt', flags=re.IGNORECASE)
    ind = pattern.findall(filetxt)
    assert len(ind)
    filemat = re.sub(pattern, '.mat', filetxt)

    # header #
    expression_header = re.compile('^@(IN\d+|OUT\d+|C\d+|C\d+S\d+):(.*)$')
    expression_taskname = re.compile('^-----(\w+)-----$')
    expression_arcbg  = re.compile(r'^ArC-bg$')
    MAT = {}
    MAT['info'] = {}
    isokfile = False
    for str in open(filetxt):
        res_header = re.findall(expression_header, str)
        res_taskname = re.findall(expression_taskname, str)
        res_arcbg = re.findall(expression_arcbg, str)
        if res_header:
            style, comment = res_header[0]
            MAT['info'][style] = comment
        elif res_taskname:
            MAT['info']['task'] = res_taskname[0]
        elif res_arcbg:
            isokfile = True
            break
    assert isokfile,  "It's NOT a data file from ArControl!"

    # data #
    expression = re.compile('^(IN\d+|OUT\d+|C\d+S\d+):(\w.*)$')
    for str in open(filetxt):
        res_expression = re.findall(expression, str)
        if res_expression:
            style, nums = res_expression[0]
            nums_list = eval('[' + nums.replace(' ', ', ') + ']')
            MAT.setdefault(style, []).append(nums_list)

    # save to file #
    sio.savemat(filemat, MAT)
示例#16
0
def save_resize_idx_mat():
    data_path = 'data\\0_dataset\\AB156\\Features\\AB156_Features_300.csv'
    names = pd.read_csv(data_path, nrows=1).columns.tolist()
    leg_names = ['Ipsi ', 'Contra ']
    sensor_names = [
        'Ankle', 'TA', 'MG', 'SOL', 'Shank', 'Knee', 'BF', 'ST', 'VL', 'RF',
        'Thigh'
    ]
    EMG_names = ['TA', 'MG', 'SOL', 'BF', 'ST', 'VL', 'RF']
    IMU_names = ['Shank', 'Thigh', 'Waist']

    indices_mat = np.zeros((0, 12), dtype=int)
    for r in range(2):
        for c in range(11):
            if 0 == r:
                sensor_name = sensor_names[c]
            else:
                sensor_name = sensor_names[10 - c]
            indices = np.array([
                i for i, name in enumerate(names)
                if leg_names[r] + sensor_name in name
            ]).reshape((1, -1))

            if sensor_name in EMG_names:
                indices = np.concatenate((indices, -1 * np.ones(
                    (1, 2), dtype=int)),
                                         axis=-1)
            if sensor_name in IMU_names:
                indices = indices.reshape((3, -1))
            indices_mat = np.concatenate((indices_mat, indices), axis=0)
        if 0 == r:
            sensor_name = 'Waist'
            indices = np.array([
                i for i, name in enumerate(names) if sensor_name in name
            ]).reshape((1, -1))
            indices = indices.reshape((3, -1))
            indices_mat = np.concatenate((indices_mat, indices), axis=0)

    data = {
        'idx_mat': indices_mat.tolist()
    }
    m4p.savemat('data\\AB_dataset\\AB_idx.mat', data)
示例#17
0
文件: singlerun.py 项目: dak4zh/SEEC
def MainT(videochannels, tempr, filename):
    #create array to put in each channel(YUV) of video
    videoanalysisnr = numpy.zeros((1000, 3))
    # run NIQE command on each channel
    for n in range(0, 3):

        videoanalysisnr[0:tempr,
                        n] = skvideo.measure.niqe(videochannels[:, :, :, n])
        print('n')

    #save in matlab frindly format
    a = videoanalysisnr
    b = tempr
    c = filename

    c = c.replace('.avi', '')
    dictvideo = {
        'nr1' + c: a[:, 0].tolist(),
        'nr2' + c: a[:, 1].tolist(),
        'nr3' + c: a[:, 2].tolist(),
        'countr' + c: c
    }
    mat4py.savemat(c + '.mat', dictvideo)
示例#18
0
    def export_as_mat(self, path, frame_index=None):
        local_frames = []
        sck_name = os.path.basename(path)

        for frame in self.frames:
            local_frame = []
            for value in frame:
                if not self.hex_format:
                    if "float" in self.data_format:
                        value = float(value)
                    else:
                        value = int(value)
                else:
                    if "float" in self.data_format:
                        value = float.fromhex(value)
                    else:
                        value = int(value, 16)
                        if value > 0x7FFFFFFF:
                            value -= 0x100000000
                local_frame.append(value)
            local_frames.append(local_frame)

        mat4py.savemat(path + '.mat', dict({sck_name: local_frames}))
示例#19
0
    def export_as_mat(self, path, frame_index=None):
        local_frames = []
        sck_name = os.path.basename(path)

        for frame in self.frames:
            local_frame = []
            for value in frame:
                if not self.hex_format:
                    if "float" in self.data_format:
                        value = float(value)
                    else:
                        value = int(value)
                else:
                    if "float" in self.data_format:
                        value = float.fromhex(value)
                    else:
                        value = int(value, 16)
                        if value > 0x7FFFFFFF:
                            value -= 0x100000000
                local_frame.append(value)
            local_frames.append(local_frame)

        mat4py.savemat(path + '.mat', dict({sck_name : local_frames}));
示例#20
0
def save_mat(data_name, is_walking=False):
    idx_x = np.arange(0, 368)
    data_s_path = '0_dataset\AB156\Features\AB156_Features_300.csv'
    x_s_train, y_s_train, x_s_val, y_s_val, x_s_test, y_s_test = \
    read_csv(data_s_path,idx_x, X_dim = 2, is_walking = is_walking)
    # target domain
    data_t_path = '0_dataset\AB186\Features\AB186_Features_300.csv'
    x_t_train, y_t_train, x_t_val, y_t_val, x_t_test, y_t_test = \
    read_csv(data_t_path,idx_x, X_dim = 2, is_walking = is_walking)
    data = {
        'x_s_train': x_s_train.tolist(),
        'x_s_val': x_s_val.tolist(),
        'x_s_test': x_s_test.tolist(),
        'y_s_train': y_s_train.tolist(),
        'y_s_val': y_s_val.tolist(),
        'y_s_test': y_s_test.tolist(),
        'x_t_train': x_t_train.tolist(),
        'x_t_val': x_t_val.tolist(),
        'x_t_test': x_t_test.tolist(),
        'y_t_train': y_t_train.tolist(),
        'y_t_val': y_t_val.tolist(),
        'y_t_test': y_t_test.tolist()
    }
    m4p.savemat(data_name, data)
示例#21
0
 def save_df_to_mat(data, out_path, the_name_you_want):
     out_path = os.path.join(out_path, '')
     df = data.apply(tuple).to_dict()
     mp.savemat(out_path + the_name_you_want, {'structs': df})
示例#22
0
plt.legend(['train', 'test'], loc='upper left')
plt.show()
test_acc=model.evaluate(cd,cf, batch_size=32)
print("test acc:", test_acc)
finish = 1

# while finish:
#     a = float(input('a'))
#     b = float(input('b'))
#     c = float(input('c'))
#     print(model.predict([[a,b,c]]))
#     finish = int(input("finish?"))

print(model.get_weights())
otevt = input("Сохранить модель? y|n")
if otevt == "y":
    dataW1 = {'w1': model.layers[0].get_weights()[0].tolist()}
   # dataB1 = {'b1': model.layers[0].get_weights()[1].tolist()}
    dataW2 = {'w2': model.layers[1].get_weights()[0].tolist()}
   # dataB2 = {'b2': model.layers[1].get_weights()[1].tolist()}
    dataW3 = {'w3': model.layers[2].get_weights()[0].tolist()}
   # dataB3 = {'b3': model.layers[2].get_weights()[1].tolist()}
    m4p.savemat('w1d.mat', dataW1)
   # m4p.savemat('b1.mat', dataB1)
    m4p.savemat('w2d.mat', dataW2)
   # m4p.savemat('b2.mat', dataB2)
    m4p.savemat('w3d.mat', dataW3)
   # m4p.savemat('b3.mat', dataB3)
    model.save("my_model.h5")

示例#23
0
)

#    pickle.dump(history.history, open(history_path, "wb"))
#    print('Saved model at {}'.format(model_path))
#    print('Saved model history at {}'.format(history_path))
#    dic = {}
#    for a in history.history.keys():
#        dic[a] =  np.asarray(history.history[a])
#    dic_loss = {}
#    dic_loss['losses'] = history2.losses
#    savemat(os.path.join(MODELS_PATH, 'loss-{}_'.format(ts)+'_B'+ combinations+'.mat'),dic_loss)
#
times = time_callback.times
dic_times = {}
dic_times['times'] = times
savemat(MODELS_PATH + 'times' + model_name + '.mat', dic_times)
model.save(MODELS_PATH + "model" + model_name + ".h5")

model.save_weights(MODELS_PATH + "model_weigths" + model_name + ".h5")

print(np.max(y_val))
# Load best model
#model.load_weights(MODELS_PATH + "model_weigths"+model_name+".h5")
#
##    model.evaluate(x_val, y_val, verbose=1)
## Predict on train, val and test
##    preds_train = model.predict(x_train, verbose=1)
#preds_val = model.predict(x_val, verbose=1)
#k = np.abs(y_val - preds_val)
print(np.max(y_val))
print(np.mean(y_val))
示例#24
0
	for i in range(estimatedTXChannelsAbs.shape[0]):
		diff = np.setdiff1d(allTxIndeces, maxk_AbsTxBins[i,:], assume_unique = True) # find indeces of of the smallest (nr - n_paths) components
		estimatedTXChannelsReal[i,diff] = 0 # zero out all small (nr-n_paths) values of Real TX bins
		estimatedTXChannelsImag[i,diff] = 0 # zero out all small (nr-n_paths) values of Imag TX bins
	del estimatedTXChannelsAbs, allTxIndeces, diff, i
	print("\bdone")
	sys.stdout.flush()
	
	# reshape decoded measurements for MATLAB processing (in the form [Q^a_1, Q^a_2, Q^a_3, ...])
	print("\t\treshaping decoded measurements ...")
	sys.stdout.flush()
	estimatedTXChannelsReal = estimatedTXChannelsReal.reshape((nRuns,nr,nt)).transpose((1,0,2)).reshape((nr,nt*nRuns))
	estimatedTXChannelsImag = estimatedTXChannelsImag.reshape((nRuns,nr,nt)).transpose((1,0,2)).reshape((nr,nt*nRuns))
	print("\bdone")
	sys.stdout.flush()
	
	# print execution time of the core decoding function (not counting load/save file execution times)
	endTime = time.time() # records the end time of decoding
	print ( "\t\tExecution Time = {} seconds".format(endTime - startTime) )
	
	dic =	{'AllQaFromPythonReal': estimatedTXChannelsReal.tolist(), 'AllQaFromPythonImag': estimatedTXChannelsImag.tolist()}

	decodeMeasurements_fname = 'decodedMeasurements_{}x{}_{}P_{}dB_SNR.mat'.format( nr, nt, n_paths, SNR_dB )
	decodeMeasurementsDir  = os.path.dirname(measurementsPath) # this is the 'temp/' directory absolute path
	decodeMeasurementsPath = ( os.path.join(decodeMeasurementsDir, decodeMeasurements_fname) )
	print("\t\tsaving decoded measurements file ...")
	sys.stdout.flush()
	savemat ( decodeMeasurementsPath , dic )	
	print("\bdone")
	sys.stdout.flush()
示例#25
0
# Get directory containing folders for each experiment
folder = sys.argv[1]
# This is the list of experiment folders
SubSubFolders = np.array(os.listdir(folder))
# Turn it into a full file path
# SubFolders = np.core.defchararray.add(DataDirectory+'\\',SubFolders) 

# # Loop over each experiment folder
for file in SubSubFolders:
    # If the file is an abf file..
    ext = os.path.splitext(file)[-1].lower() 
    if ext == '.abf':
        filename = os.path.splitext(file)[0].lower() 
        savedName = folder + '\\' + filename.split('_')[-1] + '.mat'
        # And the folder doesn't already contain the respective .mat file
        if os.path.isfile(savedName):
            continue
        # Load the abf data and save the voltage, current, and epoch data
        ABFData = pyabf.ABF(folder + '\\' + file)
        V = np.zeros([ABFData.sweepCount,len(ABFData.sweepY)])
        I = np.zeros([ABFData.sweepCount,len(ABFData.sweepY)])
        Epochs = ABFData.sweepEpochs.p1s;
        for i in ABFData.sweepList:
            ABFData.setSweep(i)
            V[i,:] = ABFData.sweepC
            I[i,:] = ABFData.sweepY
        V = V + ABFData.data[1,:ABFData.sweepEpochs.p2s[0]].mean()
        # Data = ABFData.data
        data = {'Voltage':V.tolist(),'Current':I.tolist(),'Epochs':Epochs}
        m4p.savemat(savedName, data)
示例#26
0
def save_mat(data, name):
    #saves as matlab file, requires dictionary
    from mat4py import savemat
    savemat(name, data)
# Read the NR_TR curves and put then in a single string.
ser.write(b"VPCURVE?\r\n")
Data = ser.readline().decode('utf-8').strip()
Data = Data.split(';',NR_TR)

for k in range(1, NR_TR + 1):
    Data[k] = Data[k].split(',',1)[-1]
    Vec = np.fromstring(Data[k], dtype=int, sep = ',')
    if k == 1:
       Arr_y = np.array(Vec)
    if k > 1:
       Arr_y = np.vstack([Arr_y, Vec])
 
Arr_y = Yzero + Ymult * Arr_y

Data_mat = np.vstack([Arr_x, Arr_y])
Data_mat = np.transpose(Data_mat)
m4p.savemat(fname + '_persist.mat', {'Data_mat' : Data_mat.tolist()})

np.savetxt(fname + '_persist.csv',Data_mat,delimiter=',')
        
print("Dim_Arr_x = %s\n", Arr_x.shape)
print("Dim_Arr_y = %s\n", Arr_y.shape)
print("Arr_y = %s\n"%(Arr_y))
print("Arr_x = %s\n"%(Arr_x))

#print("Trace saved to %s"%fname)
ser.close()


示例#28
0
def make_eq_db_supplies(tsid, tdate=''):
    if tdate:
        print('making eq_db_supplies for', tdate)

    start_time = time.time()

    PMINTECHPRICE = 0
    PMINTECHINTERVAL = -20
    PMINPRICE = 0.01
    PMININTERVAL = -18
    TARIFF = 9999
    FORCEDSMOOTH = 0
    PRICEACC = 0.8
    GESSTATIONTYPE = 2
    GESINTERVAL = 0

    con = ora.OracleConnection()

    gs = GeneratorsScript()
    bs = BidsScript()
    ks = KgRgeScript()
    rgs = RastrGenScript()
    ws = WsumgenScript()

    gtps = con.exec_script(gs.get_query(), {'tsid': tsid})
    # rges = con.exec_script(rgs.get_query())
    wsumgen = con.exec_script(ws.get_query(), {'tsid': tsid})

    def index_wsumgen(row):
        return row[ws['rge_code']]

    ws_index = list(map(index_wsumgen, wsumgen))

    # def index_rges(rges_row):
    #     return rges_row[rgs['hour']], rges_row[rgs['rge_code']]
    #
    # rges_index = list(map(index_rges, rges))

    # cntr = 0
    data = []
    for g in gtps:
        # print(g[gs['dpg_code']])
        dpg_bids = []
        if g[gs['station_type']] != GESSTATIONTYPE:
            dpg_bids = sorted(con.exec_script(bs.get_query(), {
                'dpg_code': g[gs['dpg_code']],
                'tsid': tsid
            }),
                              key=itemgetter(bs['hour'],
                                             bs['interval_number']))
        # dpg_bids = sorted(
        #     [bid for bid in bids if bid[bs['dpg_code']] == d[gs['dpg_code']]]
        #     , key=itemgetter(bs['hour'], bs['interval_number']))
        rge_kg = sorted(con.exec_script(ks.get_query(), {
            'dpg_id': g[gs['gtp_id']],
            'tsid': tsid
        }),
                        key=itemgetter(ks['hour']))

        for k in rge_kg:
            # rge_data = rges[rges_index.index((k[ks['hour']], k[ks['rge_code']]))]
            pmax = k[ks['pmax']]

            # if rge_data:
            #     prev_volume = rge_data[rgs['pmin']]
            #     pmax = rge_data[rgs['pmax']]
            #     if rge_data[rgs['pmin_tech']]:
            #         cur_row = (k[ks['hour']], k[ks['node']], rge_data[rgs['pmin_tech']], rge_data[rgs['pmin_tech']],
            #                    PMINTECHPRICE, k[ks['rge_code']], PMINTECHINTERVAL, 0, TARIFF, FORCEDSMOOTH)
            #         data.append(cur_row)
            #     volume = rge_data[rgs['pmin']] - rge_data[rgs['pmin_tech']]
            #     if volume:
            #         cur_row = (k[ks['hour']], k[ks['node']], volume, volume,
            #                    PMINPRICE, k[ks['rge_code']], PMININTERVAL, 0, TARIFF, FORCEDSMOOTH)
            #         data.append(cur_row)

            prev_volume = k[ks['pmin']]
            if k[ks['rge_code']] in ws_index:
                volume = k[ks['pmin']]
            else:
                volume = max(
                    k[ks['pminagg']], k[ks['dpminso']], k[ks['pmin']]
                    if g[gs['station_type']] == GESSTATIONTYPE else 0)
            if check_number(volume):
                cur_row = (k[ks['hour']], k[ks['node']], volume, volume,
                           PMINTECHPRICE, k[ks['rge_code']], PMINTECHINTERVAL,
                           0, TARIFF, FORCEDSMOOTH)
                data.append(cur_row)

            volume = k[ks['pmin']] - volume
            if check_number(volume):
                cur_row = (k[ks['hour']], k[ks['node']], volume, volume,
                           PMINPRICE, k[ks['rge_code']], PMININTERVAL, 0,
                           TARIFF, FORCEDSMOOTH)
                data.append(cur_row)

            if k[ks['rge_code']] in ws_index:
                if [w for w in wsumgen if w[ws['rge_code']] == k[ks['rge_code']]][0][ws['hour_start']]\
                   <= k[ks['hour']] <= [w for w in wsumgen if w[ws['rge_code']] == k[ks['rge_code']]][0][ws['hour_end']]:
                    w = wsumgen[ws_index.index(k[ks['rge_code']])]
                    integral_id = w[ws['integral_id']] if w[
                        ws['integral_id']] else 0
                    volume = pmax - prev_volume
                    if check_number(volume):
                        cur_row = (k[ks['hour']], k[ks['node']], volume, 0,
                                   w[ws['price']], k[ks['rge_code']],
                                   GESINTERVAL, w[ws['integral_id']], TARIFF,
                                   FORCEDSMOOTH)

                        data.append(cur_row)
            elif g[gs['station_type']] == GESSTATIONTYPE:
                volume = min(k[ks['p']] - prev_volume, pmax - prev_volume)
                if check_number(volume):
                    min_volume = volume
                    price = PMINPRICE
                    cur_row = (k[ks['hour']], k[ks['node']], volume,
                               min_volume, price, k[ks['rge_code']],
                               GESINTERVAL, 0, TARIFF, FORCEDSMOOTH)

                    data.append(cur_row)
            else:
                for bid in dpg_bids:
                    if bid[bs['hour']] == k[ks['hour']]:
                        if k[ks['pmax_dpg']] > k[ks['pmin_dpg']]:
                            if min(bid[bs['volume']],
                                   k[ks['pmax_dpg']]) <= k[ks['pmin_dpg']]:
                                k_distr = k[ks['kg_min']]
                            else:
                                k_distr = k[ks['kg_reg']]
                        else:
                            k_distr = k[ks['kg']]

                        bid_rge = (bid[bs['volume']] -
                                   k[ks['pmin_dpg']]) * k_distr + k[ks['pmin']]
                        volume = min(bid_rge - prev_volume, pmax - prev_volume)

                        if check_number(volume):
                            prev_volume = bid_rge
                            min_volume = volume if bid[
                                bs['interval_number']] < 0 else 0
                            price_acc = PMINPRICE if g[
                                gs['is_pintsch_gas']] else PRICEACC
                            price = bid[bs['price']] if bid[
                                bs['price']] > 0 else price_acc
                            cur_row = (bid[bs['hour']], k[ks['node']], volume,
                                       min_volume, price, k[ks['rge_code']],
                                       bid[bs['interval_number']], 0, TARIFF,
                                       FORCEDSMOOTH)

                            data.append(cur_row)

    data = sorted(data, key=itemgetter(1, 0, 5, 6))
    mat4py.savemat('common.mat', {'eq_db_supplies': data})

    print('---------- %s seconds -----------' % (time.time() - start_time))
示例#29
0
        [ttslow, g2s] = slow.normalize()
        #line.set_xdata(tt[:-2]+ttslow);
        #line.set_ydata(g2[:-2]+g2s);
        #plt.pause(0.001)
        tplot = time()

[tt, g2, nfotoni] = fast.normalize()
[ttslow, g2s] = slow.normalize()

ser.write(b'd')  # Stop Acquisition
ser.close()  #Closing serial connection

savemat(
    'misura.mat', {
        'time': [tt[:-2] + ttslow],
        'g2': g2[:-2] + g2s,
        'ora_della_misura': starttime,
        'avg_photons_per_second': float(nfotoni) / duration
    })
savemat(
    filename, {
        'time': [tt[:-2] + ttslow],
        'g2': g2[:-2] + g2s,
        'ora_della_misura': starttime,
        'avg_photons_per_second': float(nfotoni) / duration
    })
print('')
print('Correlation function saved in ', filename)

##if save_to_bin:
##        from array import array
示例#30
0
        callbacks=callbacks)

    #    pickle.dump(history.history, open(history_path, "wb"))
    #    print('Saved model at {}'.format(model_path))
    #    print('Saved model history at {}'.format(history_path))
    #    dic = {}
    #    for a in history.history.keys():
    #        dic[a] =  np.asarray(history.history[a])
    #    dic_loss = {}
    #    dic_loss['losses'] = history2.losses
    #    savemat(os.path.join(MODELS_PATH, 'loss-{}_'.format(ts)+'_B'+ combinations+'.mat'),dic_loss)
    #
    times = time_callback.times
    dic_times = {}
    dic_times['times'] = times
    savemat('times.mat', dic_times)
    model.save(MODELS_PATH + "model_" + combinations + ".h5")

    model.save_weights(MODELS_PATH + "model_weigths" + combinations + ".h5")

    # Load best model
    model.load_weights(MODELS_PATH + "model_weigths" + combinations + ".h5")

    #    model.evaluate(x_val, y_val, verbose=1)
    # Predict on train, val and test
    #    preds_train = model.predict(x_train, verbose=1)
    preds_val = model.predict([
        x_val,
        np.reshape(
            y_val[:, :, :, 1],
            newshape=(y_val.shape[0], y_val.shape[1], y_val.shape[2], 1))
示例#31
0
文件: matlab.py 项目: afeldman/apu
 def write(self):
     """ write mat file """
     savemat(self._filepath, self.data)
     return self.data
示例#32
0
f.close()

cd = np.array(zad_float)
cf = np.array(ych_float)

log = model.fit(cd, cf, batch_size=32, epochs=10)

plt.plot(log.history['loss'])
plt.grid(True)
plt.ylabel('Loss')
plt.xlabel('Epohs')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
test_acc = model.evaluate(cd, cf, batch_size=32)
print("test acc:", test_acc)
otevt = input("Сохранить модель? y|n")
if otevt == "y":
    dataW1 = {'w1': model.layers[0].get_weights()[0].tolist()}
    #dataB1 = {'b1': model.layers[0].get_weights()[1].tolist()}
    dataW2 = {'w2': model.layers[1].get_weights()[0].tolist()}
    # dataB2 = {'b2': model.layers[1].get_weights()[1].tolist()}
    dataW3 = {'w3': model.layers[2].get_weights()[0].tolist()}
    #dataB3 = {'b3': model.layers[2].get_weights()[1].tolist()}
    m4p.savemat('w1.mat', dataW1)
    #m4p.savemat('b1.mat', dataB1)
    m4p.savemat('w2.mat', dataW2)
    #m4p.savemat('b2.mat', dataB2)
    m4p.savemat('w3.mat', dataW3)
    #m4p.savemat('b3.mat', dataB3)
    model.save("my_model.h5")
            print(type(s_p_e))
            model.fit_generator(train_generator_2(train_val_p2, number_train,
                                                  n_batch, folder_train, size,
                                                  N),
                                validation_data=val_generator_2(
                                    train_val_p2, number_val, n_batch,
                                    folder_train, size, N),
                                validation_steps=val_pe,
                                steps_per_epoch=s_p_e,
                                epochs=n_epochs)

            times = time_callback.times
            dic_times = {}
            dic_times['times'] = times
            savemat(
                combinations + "_" + BACKBONE + '_' + name_model +
                '_times.mat', dic_times)
            model.save_weights(combinations + "_" + BACKBONE + "_" +
                               name_model + "_model_wIoU" + str(size) + ".h5")
            ############END TRAINING#############

            # Load best model
            model.load_weights(combinations + "_" + BACKBONE + "_" +
                               name_model + "_model_wIoU" + str(size) + ".h5")

            #    model.evaluate(x_val, y_val, verbose=1)
            # Predict on train, val and test
            if name_model == "PSPNet":
                preds_train = model.predict(x_train2, verbose=1)
                preds_val = model.predict(x_val2, verbose=1)
                for k in range(0, x_val.shape[0], int(x_val.shape[0] / 100)):