Esempio n. 1
0
    def read(self, h5_groups=None, index=None):
        """
        Read the h5 file

        We need info from the xmf file, so this file is also read if info is not given
        """
        if h5_groups is None:
            h5_groups = XdmfReader(self.filename).extract_h5_groups()

        data = {}
        with h5file(str(self.filename), 'r') as file_h5:
            for time, step in h5_groups.items():
                datat = {}
                for attrib, group in step.items():
                    npdata = np.asarray(file_h5[group])
                    if attrib == "Positions":
                        pos = npdata.reshape((-1, 3))
                        datat['Posx'] = pos[:, 0]
                        datat['Posy'] = pos[:, 1]
                        datat['Posz'] = pos[:, 2]
                    else:
                        datat[attrib] = npdata


#                datat["Aggkey"] = list(map(lambda x: hash((time,x)),datat["Label"]))
                data[time] = pd.DataFrame.from_dict(datat)

                if index is not None:
                    data[time].set_index(index, inplace=True)
        return data
def main(source_dir, destination_name, image_shape, blocksize, initial_run):
    if initial_run:
        print('Starting')

        filenames = [
            join(source_dir, name) for name in listdir(source_dir)
            if name.endswith('.mat')
        ]

        print('Found {} files'.format(len(filenames)))

        total_pixel_count = len(filenames) * image_shape[0] * image_shape[1]
        destination_file = h5file(destination_name, mode='x', driver=None)
        dataset = destination_file.create_dataset(name='data',
                                                  shape=(total_pixel_count,
                                                         image_shape[2]),
                                                  dtype='f',
                                                  data=None,
                                                  compression='gzip',
                                                  compression_opts=6)

        # Fill the dataset.
        image_pixel_count = image_shape[0] * image_shape[1]
        for i, filename in enumerate(tqdm(permutation(filenames))):
            data = loadmat(filename)['data'].reshape(image_pixel_count,
                                                     image_shape[2])
            dataset[i * image_pixel_count:(i + 1) *
                    image_pixel_count] = permutation(data)

        # Save the dataset.
        dataset.flush()
    else:
        destination_file = h5file(destination_name, mode='r+', driver=None)
        dataset = destination_file['data']

    total_pixel_count = dataset.shape[0]

    # Shuffel the data.
    num_iterations = ceil(total_pixel_count / blocksize)

    for i in tqdm(range(num_iterations)):
        dataset[i * blocksize:(i + 1) * blocksize] = permutation(
            np.array(dataset[i * blocksize:(i + 1) * blocksize]))

    # Save all data and close files.
    dataset.flush()
    destination_file.close()
Esempio n. 3
0
    def list_of_channels(self, src):
        """
        Returns all or dataset names in H5 file
        
        @param src: Source File Path.
        @type src: str
        
        @return: list
        """
        try:
            with h5file(src, 'r') as hf:
                dataset_names = list(hf.keys())

            return dataset_names

        except Exception as e:
            self.eetc.print_if(e)
Esempio n. 4
0
        file_path = os.path.join(snaps_path, file)
        stf_subdir = os.path.join(catalogues_path, file[:-5])

        # Check if stf subdirectory has properties file
        has_properties = False
        if os.path.isdir(stf_subdir) and len(os.listdir(stf_subdir)) > 0:
            for filename in os.listdir(stf_subdir):
                if filename.endswith('.properties'):
                    has_properties = True

        if os.path.isfile(file_path) and file.endswith(
                '.hdf5') and not has_properties:

            if args.check_snipshots:
                with h5file(file_path, 'r') as f:
                    output_type = f['Header'].attrs['SelectOutput'].decode(
                        'ascii')
            else:
                output_type = 'Default'

            # Check if the file is snapshot (keep) or snipshot (skip)
            if output_type == 'Default':
                snapshot_files.append(file_path)
                snapshot_sizes.append(os.path.getsize(file_path))
                stf_subdirs.append(stf_subdir)
                snapshot_numbers_sort.append(int(file_path[-9:-5]))

    number_snapshots = len(snapshot_files)

    # Sort snapshots by their output number
Esempio n. 5
0
 def data(self, src, dataset):
     the_file = h5file(src, mode="r")
     return np.asarray(the_file[dataset])
Esempio n. 6
0
image_shape = (214, 407, 25)  # (254, 510, 15)
blocksize = 1024 * 1024 * 1024 // 8  # 1GB * 15 * 2 ~ 30 GB

#"""

print('Starting')

filenames = [
    join(source_dir, name) for name in listdir(source_dir)
    if name.endswith('.mat')
]

print('Found {} files'.format(len(filenames)))

total_pixel_count = len(filenames) * image_shape[0] * image_shape[1]
destination_file = h5file(destination_name, mode='x', driver=None)
dataset = destination_file.create_dataset(name='data',
                                          shape=(total_pixel_count,
                                                 image_shape[2]),
                                          dtype='f',
                                          data=None,
                                          compression='gzip',
                                          compression_opts=6)

# Fill the dataset.

# In[5]:

image_pixel_count = image_shape[0] * image_shape[1]
for i, filename in enumerate(tqdm(permutation(filenames))):
    data = loadmat(filename)['data'].reshape(image_pixel_count, image_shape[2])
Esempio n. 7
0
from caproto.server import ioc_arg_parser, run
from caproto.threading import pyepics_compat as epics
from h5py import File as h5file
from satt_app import *

################################################
prefix = "AT2L0:SIM"
num_blades = 18
eV_name = "LCLS:HXR:BEAM:EV"
pmps_run_name = "PMPS:HXR:AT2L0:RUN"
pmps_tdes_name = "PMPS:HXR:AT2L0:T_DES"
abs_data = h5file('../../../absorption_data.h5', 'r')
config_data = h5file('../../../configs.h5', 'r')
################################################

ioc_args = {
"absorption_data" : abs_data,
"config_data" : config_data,
"filter_group" : [str(N+1).zfill(2) for N in range(num_blades)],
 "eV_pv" : eV_name,
 "pmps_run_pv" : pmps_run_name,
 "pmps_tdes_pv" : pmps_tdes_name
}

if __name__ == '__main__':
    ioc_options, run_options = ioc_arg_parser(
        default_prefix=prefix,
        desc=IOCMain.__doc__)

    ioc = create_ioc(
        **ioc_args,
Esempio n. 8
0
import time

if __name__ == '__main__':

    logging.getLogger('train.py').setLevel(logging.DEBUG)
    # logging.basicConfig(filename = os.path.join(os.getcwd(), 'wllog.txt'), level = logging.DEBUG)
    logging.basicConfig(
        level=logging.DEBUG,
        filename='new.log',
        filemode='a',
        format=
        '%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'
    )
    # file_nameh5 = '/home/data_new/zhangyongqing/flx/pythoncode/SLP_ct2.h5'
    file_nameh5 = '/opt/data/private/wlin/SLP_ct2.h5'
    datasetfile = h5file(file_nameh5, 'r')
    X = np.array(datasetfile['/X_ct2'])
    Y = np.array(datasetfile['/Y_ct2'])

    X_train, X_test, y_train, y_test = train_test_split(X[:, :],
                                                        Y,
                                                        test_size=0.3,
                                                        random_state=2020)
    best_acc = 0
    best_depth = 0
    best_rc = 0
    learnrate = [0.1]
    for learn_rate in learnrate:
        for m_depth in range(33, 50, 10):
            start = time.clock()
            clf = XGBClassifier(
Esempio n. 9
0
def load_h5(file_name):
    check_extension(file_name, 'h5')
    return h5file(file_name)
Esempio n. 10
0
def train(LR, batchsize, epochs, list_train, list_test, knum):

    rnn = SleepMoudel.RNN2()
    # print(cnn)
    optimizer = torch.optim.Adam(
        rnn.parameters(), lr=LR,
        weight_decay=0.01)  # optimize all cnn parameters
    # optimizer1 = torch.optim.Adam(cnn.parameters(), lr=0.0001)   # optimize all cnn parameters
    # loss_func = nn.CrossEntropyLoss()
    total_loss = []
    iteration = []
    total_accuracy = []
    total_train_accuracy = []
    test_total_loss = []
    i = 0
    best_accuracy = 0.0
    last_improved = 0
    require_improvement = 2000
    flag = False
    loader = buf.pre_train(list_train, batchsize)
    x_test, y_test = buf.pre_test(list_test, batchsize)
    x_test = x_test.unsqueeze(dim=1)

    y_test = y_test.unsqueeze(1)

    for epoch in range(epochs):
        # output = np.empty(shape=[0,1000])
        # stratnum=0
        for load in range(1):
            # endnum=stratnum+10
            # loader=SleepEEG2.pre_train(list_train[stratnum:endnum],batchsize)
            for b_x, b_y in loader:
                b_x = b_x.unsqueeze(dim=1)
                b_y = b_y.unsqueeze(1)
                if len(b_x) != 1:
                    # b_x=b_x.view(6,-1,400)
                    if len(b_x) % 6 == 0:
                        b_x = b_x.view(6, -1, 912)
                    out = rnn(b_x)
                    # pred_y = torch.max(out, 1)[1].data.numpy()
                    # prediction = torch.max(F.softmax(out,dim=1), 1)[1]
                    # # print(prediction,prediction.shape)
                    # pred_y = prediction.data.numpy().squeeze()
                    # print(out.shape)
                    loss = -rnn.crf(
                        out, b_y,
                        torch.ones(
                            (out.shape[0], out.shape[1]), dtype=torch.bool))
                    # loss = loss_func(out, b_y)   # cross entropy loss

                    total_loss.append(loss)
                    i = i + 1
                    iteration.append(i)
                    optimizer.zero_grad(
                    )  # clear gradients for this training step
                    loss.backward(loss)  # backpropagation, compute gradients
                    optimizer.step()

                    pred_y = torch.argmax(out, 2).data.numpy()
                    # print(pred_y)
                    train_accuracy = float(
                        (pred_y
                         == b_y.data.numpy()).astype(int).sum()) / float(
                             len(b_y))

                    # train_accuracy = float((pred_y == np.array(b_y)).astype(int).sum()) / float(len(b_y))
                    total_train_accuracy.append(train_accuracy)
                    # # output=np.concatenate((output, outrnn.data.numpy()), axis = 0)
                    if (epoch + 1) % 50 == 0:
                        #newfile = h5file(save_path+'train_pred/result'+str(i)+'.h5', 'w')
                        newfile['/pred'] = pred_y
                        newfile['/lable'] = b_y.data.numpy()
                        newfile.close()

                    if i % 100 == 0:

                        print('Epoch: ', epoch + 1, 'Step: ', i,
                              '| train loss: %.4f' % loss.data.numpy()[-1],
                              '| train accuracy: %.2f' % train_accuracy)

        test_loss = 0
        accuracy = 0

        for testload in range(1):
            # testloader=SleepEEG2.pre_train(list_test[testload:testload+1],batchsize)
            # for testnum,(x_test,y_test) in enumerate(testloader):
            if len(x_test) != 1:
                # x_test=x_test[:18138,:,:]
                # y_test=y_test[:18138,:]

                if len(x_test) != 1:
                    x_test = x_test.view(6, -1, 912)

                    test_output = rnn(x_test)
                    test_loss = -rnn.crf(
                        test_output, y_test,
                        torch.zeros(
                            (test_output.shape[0], 1), dtype=torch.bool))

                    test_total_loss.append(test_loss)
                    # pred_y = torch.max(F.softmax(test_output,dim=1), 1)[1]
                    # pred_y = pred_y.data.numpy().squeeze()

                    pred_y = torch.argmax(test_output, 2).data.numpy()
                    accuracy = float(
                        (pred_y
                         == y_test.data.numpy()).astype(int).sum()) / float(
                             len(y_test))

                    # torch.save(pred_y,'pred_y2'+str(knum)+str(testload)+'_'+str(testnum)+'.txt')
                    # torch.save(y_test,'y_test2'+str(knum)+str(testload)+'_'+str(testnum)+'.txt')
                    # accuracy =float((pred_y == np.array(y_test)).astype(int).sum()) / float(len(y_test))
                    total_accuracy.append(accuracy)
                    print('Epoch: ', epoch + 1, '| test loss: ',
                          test_loss.data.numpy()[-1],
                          '| test accuracy: %.4f' % accuracy)
                    print(pred_y.shape)
                    if (epoch + 1) % 2 == 0:
                        newfile = h5file(
                            save_path + 'test_pred/testresult' + str(knum) +
                            '.h5', 'w')

                        newfile['/pred'] = pred_y
                        newfile['/lable'] = y_test.data.numpy()
                        newfile.close()
                    torch.cuda.empty_cache()

                    # test_loss /= len(list_test)
                    # accuracy /= len(list_test)

                    # print('Epoch: ', epoch,'| train loss: ' , test_loss, '| test accuracy: ',accuracy)
                    # total_accuracy.append(accuracy)
                    # test_total_loss.append(test_loss)

                # b_x = b_x.cpu()
                # b_y = b_y.cpu()
            torch.cuda.empty_cache()
            # stratnum=stratnum+10

        # torch.save(output, 'knum'+str(knum)+'outputdata'+str(epoch)+'.txt')
        if (epoch + 1) % 50 == 0:

            # print(total_train_accuracy,now_accuracy,now_loss)
            torch.save(rnn, save_path + 'result/gru_Epoch213' + str(knum) +
                       str(epoch) + '.pkl')  # save entire net
            torch.save(
                total_loss, save_path + 'result/gru_Epoch_total_loss213' +
                str(knum) + str(epoch) + '.txt')
            torch.save(
                test_total_loss, save_path + 'result/gru_loss_Epoch213' +
                str(knum) + str(epoch) + '.txt')
            torch.save(
                total_train_accuracy, save_path + 'result/gru_Epoch_tracc213' +
                str(knum) + str(epoch) + '.txt')
            torch.save(
                total_accuracy, save_path + 'result/gru_Epoch_teacc213' +
                str(knum) + str(epoch) + '.txt')