コード例 #1
0
def setup_module():
    temp_dir = None
    try:
        import scipy.io
        temp_dir = tempfile.mkdtemp()
        for i in range(0, len(mat_files)):
            mat_files[i] = os.path.join(temp_dir, mat_files[i])
        scipy.io.savemat(file_name=mat_files[0], mdict=to_julia)
        hdf5storage.savemat(file_name=mat_files[1], mdict=to_julia)

        #julia_command(script_names[0], mat_files[0], mat_files[2])
        julia_command(script_names[0], mat_files[1], mat_files[3])

        #hdf5storage.loadmat(file_name=mat_files[2],
        #                    mdict=from_julia_v7_to_v7p3)
        hdf5storage.loadmat(file_name=mat_files[3],
                            mdict=from_julia_v7p3_to_v7p3)
    except:
        pass
    else:
        ran_julia_successful[0] = True
    finally:
        for name in mat_files:
            if os.path.exists(name):
                os.remove(name)
        if temp_dir is not None and os.path.exists(temp_dir):
            os.rmdir(temp_dir)
コード例 #2
0
def setup_module():
    teardown_module()
    matlab_command = "run('" + script_names[0] + "')"
    subprocess.check_call(['matlab', '-nosplash', '-nodesktop',
                          '-nojvm', '-r', matlab_command])
    scipy.io.loadmat(file_name=mat_files[1], mdict=types_v7)
    hdf5storage.loadmat(file_name=mat_files[0], mdict=types_v7p3)

    hdf5storage.savemat(file_name=mat_files[2], mdict=types_v7p3)
    matlab_command = "run('" + script_names[1] + "')"
    subprocess.check_call(['matlab', '-nosplash', '-nodesktop',
                          '-nojvm', '-r', matlab_command])
    scipy.io.loadmat(file_name=mat_files[3], mdict=python_v7)
    hdf5storage.loadmat(file_name=mat_files[2], mdict=python_v7p3)
コード例 #3
0
 def load_matlab(self, filename, key='results'):
     import hdf5storage
     data = hdf5storage.loadmat(filename)[key]
     expected_shape = (len(self.stimuli), 1)
     if not data.shape == expected_shape:
         raise ValueError('Wrong number of saliency maps! Expected {}, got {}'.format(expected_shape, data.shape))
     self._saliency_maps = [data[i, 0] for i in range(len(self.stimuli))]
コード例 #4
0
    def loaddata(self, matfilename=None, picklefilename=None):
        """
        Loads data from saved files. If matfilename is not None, all numerical data is read from ithe file (but not
         the solver objects). If picklefilename is not None, solver objects are read from the file.
        :param matfilename:
        :param picklefilename:
        :return:
        """

        if picklefilename is not None:
            with open(picklefilename, "r") as f:
                solvers = cPickle.load(f)
                self.set_solvers(solvers)

        if matfilename is not None:
            mdict = hdf5storage.loadmat(matfilename)
            self.signaldim = mdict[u'signaldim']
            self.dictdim = mdict[u'dictdim']
            self.numdata = mdict[u'numdata']
            self.deltas = mdict[u'deltas'].copy()
            self.rhos = mdict[u'rhos'].copy()
            if mdict[u'err'] is not None:
                self.err = mdict[u'err'].copy()
            if mdict[u'ERCsuccess'] is not None:
                self.ERCsuccess = mdict[u'ERCsuccess'].copy()
            if u'simData' in mdict.keys():
                self.simData = mdict[u'simData']
コード例 #5
0
ファイル: __init__.py プロジェクト: coobas/pydons
    def loadmat(cls, file_name, path='/', **kwargs):
        """Load from a Matlab (HDF5 format) file

        :param file_name: file name
        :param path: path toread data from
        :param kwargs: keyword passed to hdf5storage.loadmat
        """
        return cls(hdf5storage.loadmat(file_name, marshaller_collection=cls.__mc()), **kwargs)
コード例 #6
0
 def load_matlab(self, filename, key='results'):
     import hdf5storage
     data = hdf5storage.loadmat(filename)[key]
     if len(data.shape) == 2 and len(self.stimuli) > 1:
         if data.shape[0] == 1:
             data = data[0]
         elif data.shape[1] == 1:
             data = data[:, 0]
         else:
             raise ValueError('Data has wrong shape: {} (need 1xN, Nx1 or N)'.format(data.shape))
     if len(data.shape) > 2:
         raise ValueError('Data has wrong shape: {} (need 1xN, Nx1 or N)'.format(data.shape))
     expected_shape = (len(self.stimuli),)
     if not data.shape == expected_shape:
         raise ValueError('Wrong number of saliency maps! Expected {}, got {}'.format(expected_shape, data.shape))
     self._saliency_maps = [data[i] for i in range(len(self.stimuli))]
コード例 #7
0
def main(argv):
    assert len(argv) == 2, 'Usage: source_dir output_dir'
    source_dir = argv[0]
    output_dir = argv[1]

    pathes = glob.glob(source_dir + "/*.mat")

    for mat_path in pathes:
        print 'Converting {}'.format(mat_path)
        mat = hdf5storage.loadmat(mat_path)
        attrs, datasets = parse(mat)

        name = os.path.splitext(os.path.basename(mat_path))[0]
        f = h5py.File(os.path.join(output_dir, name + '.hdf5'), 'w')
        for key, val in attrs.items():
            f.attrs[key] = val
        for key, val in datasets.iteritems():
            f.create_dataset(key, data=val)
コード例 #8
0
ファイル: app.py プロジェクト: rinadwih27/iforest
def forest():
    print(session['data'])
    if session['data'].split('.')[-1] == 'csv':
        data = pd.read_csv(session['data'])
    elif session['data'].split('.')[-1] == 'mat':
        mat = hd.loadmat(session['data'])
        X = pd.DataFrame(mat['X'])
        Y = pd.DataFrame(mat['y'])
        Y.rename(columns={0: 'CLASS'}, inplace=True)
        data = pd.concat([X, Y], axis=1)

    global filename
    if os.path.isfile(os.path.join('static/img/', filename)):
        os.remove(os.path.join('static/img/', filename))

    if request.method == 'GET':
        return render_template('forest.html',
                               column=data.columns,
                               data=session['data'].split('/')[-1])
    else:

        kelas = request.form['kelas']
        normal = request.form['normal']
        abnormal = request.form['abnormal']
        t_size = float(request.form['tsize']) / 100
        cont = float(request.form['cont']) / 100
        n_tree = int(request.form['tree'])
        samples = int(request.form['sample'])

        # Normal Abnormal
        dt_normal = data.loc[data[kelas] == int(normal)]
        dt_abnormal = data.loc[data[kelas] == int(abnormal)]
        # print(dt_normal)

        # Split Data
        normal_train, normal_test = train_test_split(dt_normal,
                                                     test_size=t_size,
                                                     random_state=42)
        abnormal_train, abnormal_test = train_test_split(dt_abnormal,
                                                         test_size=t_size,
                                                         random_state=42)
        train = pd.concat([normal_train, abnormal_train])
        test = pd.concat([normal_test, abnormal_test])
        # train[kelas] = train[kelas].map({0:1,1:-1})
        data[kelas] = data[kelas].map({0: 1, 1: -1})
        test[kelas] = test[kelas].map({0: 1, 1: -1})

        # Model
        model = IsolationForest(n_estimators=n_tree,
                                contamination=cont,
                                max_samples=samples,
                                random_state=100)
        pred = model.fit_predict(data.drop([kelas], axis=1))
        # print(pred)

        # # Model 2
        model2 = IsolationForest(n_estimators=n_tree,
                                 contamination=cont,
                                 max_samples=samples,
                                 random_state=100)
        model2.fit(train.drop([kelas], axis=1))
        pred2 = model2.predict(test.drop([kelas], axis=1))

        # # Hasil Prediksi
        dt = data.drop([kelas], axis=1)
        df_pred = pd.DataFrame(pred)
        df_pred.columns = [kelas]
        dt = pd.concat([dt, df_pred], axis=1)

        # Confusion Matrix
        cm = confusion_matrix(data[kelas], pred)
        df_cm = pd.DataFrame(cm, ['Anomali', 'Normal'],
                             ['Prediksi Anomali', 'Prediksi Normal'])
        plt.figure(figsize=(6, 4))
        sns.set(font_scale=1.2)
        sns.heatmap(df_cm, annot=True, annot_kws={'size': 16}, fmt='g')

        # Random filename
        filename = ''.join(
            random.choices(string.ascii_uppercase + string.digits, k=5))
        filename = filename + '.png'
        # print(filename)
        plt.savefig(os.path.join('static/img/', filename))

        plt.clf()

        # Confusion Matrix 2
        cm2 = confusion_matrix(test[kelas], pred2)
        df_cm2 = pd.DataFrame(cm2, ['Anomali', 'Normal'],
                              ['Prediksi Anomali', 'Prediksi Normal'])
        plt.figure(figsize=(6, 4))
        sns.set(font_scale=1.2)
        sns.heatmap(df_cm2, annot=True, annot_kws={'size': 16}, fmt='g')

        # Random filename
        filename2 = ''.join(
            random.choices(string.ascii_uppercase + string.digits, k=5))
        filename2 = filename2 + '.png'
        # print(filename)
        plt.savefig(os.path.join('static/imgv2/', filename2))

        # Convert to Dataframe
        df_pred = pd.DataFrame(pred)
        df_pred2 = pd.DataFrame(pred2)

        # Metrik Evaluasi 1
        f1_s = f1_score(data[kelas], df_pred, average='weighted')
        ps = precision_score(data[kelas], df_pred, average='weighted')
        rs = recall_score(data[kelas], df_pred, average='weighted')

        # Metrik Evaluasi 2
        f1_s1 = f1_score(test[kelas], df_pred2, average='weighted')
        ps1 = precision_score(test[kelas], df_pred2, average='weighted')
        rs1 = recall_score(test[kelas], df_pred2, average='weighted')

        # tp, fp, fn, tn = cm.ravel()
        # rec=tp/(tp+fn)
        rs = round(rs, 3)
        rs1 = round(rs1, 3)
        # prec=tp/(tp+fp)
        ps = round(ps, 3)
        ps1 = round(ps1, 3)
        # f1_score=2*((prec*rec)/(prec+rec))
        f1_s = round(f1_s, 3)
        f1_s1 = round(f1_s1, 3)

        filepath = 'img/' + filename
        filepath2 = 'imgv2/' + filename2

        # print(session['data'].split('.')[:-1].join())
        print(session['data'].split('.'))

        return render_template('forest.html',
                               tsize=int(t_size * 100),
                               cont=float(cont * 100),
                               tree=n_tree,
                               sample=samples,
                               dt=dt.values,
                               column=dt.columns,
                               cm=df_cm,
                               ps=ps,
                               rs=rs,
                               f1_s=f1_s,
                               ps1=ps1,
                               rs1=rs1,
                               f1_s1=f1_s1,
                               kelas=kelas,
                               filepath=filepath,
                               filepath2=filepath2,
                               data=session['data'].split('/')[-1])
コード例 #9
0
ファイル: data_merge.py プロジェクト: isterev/PDOF
name_base= DATA_DIR + '/EVs/' + strategy + suffix + '/'
#names = glob.glob(name_base + "\*.mat")
data = {}

file_name = DATA_DIR + '/EVs/'+ strategy + '_' + str(n) + '.mat'
try:
    os.remove(file_name)
except:
    pass


for i in xrange(1, n + 1):
         
    name= name_base + str(i) + '.mat' 
    data = {}
    data["ev_" + str(i)] = hdf5storage.loadmat(name)
    hdf5storage.savemat(file_name, data)
    print "saved file " + str(i)
    
    #try:
    #    if(DELETE):
    #       os.remove(name)
    #except:
    #    pass
    
''' 
   name= name_base + str(i) + '.mat'     
   with h5py.File(name, 'r') as mat_file:
             
        #f.create_group(str(i))              
        h5py.h5o.copy(mat_file.id, mat_file.name, f.id, str(i))
コード例 #10
0
# In[9]:


vr = 'R0'


# # Load the AOD files
# 

# ## Load the AOD files from 4STAR

# In[10]:


ar = hs.loadmat(fp+'/aod_ict/all_aod_KORUS_ict.mat')


# In[11]:


ar.keys()


# ## Adjust the AOD to reflect dirt contamination

# In[12]:


aod_names = sorted([a for a in ar.keys() if ('AOD' in a) and not ('UNC' in a)])
コード例 #11
0
#import caffe
#sys.path.append('/home/tzeng/caffe_3d/python/caffe')
#foo = imp.load_source('caffe.io', '/home/tzeng/caffe_3d/python/caffe/__init__.py')
import caffe.io
from caffe.proto import caffe_pb2
print os.path.dirname(caffe_pb2.__file__)
#from caffe.proto import caffe_pb2
mat_file ='/home/tzeng/space/SegEM_project/data/train_segem.mat'
#mat_file ='/home/tzeng/caffe_flx_kernel/data/snems3d_train_RF8.mat'
#mat_file ='/home/tzeng/caffe_flx_kernel/data/snems3d_test_pad_2_47_47.mat'
#mat_file= '/home/tzeng/caffe_flx_kernel/data/snems3d_train_RF8_20Percent.mat'
#mat_file= '/home/tzeng/caffe_flx_kernel/data/snems3d_predict_norm.mat'
#snems3d_train_pad_4_47_47.mat'
#mat_file ='/home/tzeng/caffe_3d/data/snems3d_test_pad25.mat'
#mat_file ='/home/tzeng/caffe_3d/data/test'
out =hdf5storage.loadmat(mat_file,format='7.3')
#print len(out)
size = out['data'].shape;
size=size[1];
print size
k=1
#db_path_data='/home/tzeng/caffe_3d/data/mri_test_pad'
db_path_data='/home/tzeng/space/SegEM_project/data/train_segem'
#db_path_data='/home/tzeng/caffe_3d/data/snems3d_train_pad25'
#db_path_data='/home/tzeng/caffe_flx_kernel/data/snems3d_train_pad_4_47_47_rotations_hFlip'
#db_path_data='/home/tzeng/caffe_flx_kernel/data/snems3d_predict_norm'
#db_path_data='/home/tzeng/caffe_flx_kernel/data/snems3d_test_norm'
#db_path_data='/home/tzeng/caffe_flx_kernel/data/snems3d_test_pad_2_47_47_FlipRatation'
#snems3d_test_submit_pad25'
db_data_lb=leveldb.LevelDB(db_path_data, create_if_missing=True, error_if_exists=False)
batch = leveldb.WriteBatch()
コード例 #12
0
def main():
    NUM_FEATURES = 21  # number of RGB channel features to use for classifying each pixel

    dtype = torch.float
    #device = torch.device("cpu")
    device = torch.device("cuda:0")  # Uncomment this to run on GPU

    # Load training data
    print('Load training data...')
    #    filepath = "C:/Users/CTLab/Documents/George/Arri_analysis_4-29-19/kmeans_data_5-6-19.mat"
    #    if 'mat2' in locals(): print('Yes')
    #    mat2 = hdf5storage.loadmat(filepath)
    #    x = torch.from_numpy(mat2['X_single'][:,:NUM_FEATURES]) # make 24 -> 21 features as needed
    #    y = torch.from_numpy(mat2['y_label']).long()-1 # convert to long tensor for loss function
    #    y = y.squeeze(1)
    x, y = load_trainingdata()

    m_training = x.size()[0]
    num_features = x.size()[1]
    num_classes = np.size(np.unique(y))
    classes = [
        "Artery", "Bone", "Cartilage", "Dura", "Fascia", "Fat", "Muscle",
        "Nerve", "Skin", "Parotid", "PerichondriumWCartilage", "Vein"
    ]

    #%%Load validation data
    print('Load validation data...')
    filepath_val = "C:/Users/CTLab/Documents/George/Arri_analysis_5-6-19/FreshCadaver004_20190429_data_5-6-19_pyfriendly.mat"
    mat3 = hdf5storage.loadmat(filepath_val)
    x_val = torch.from_numpy(mat3['X'][:, :NUM_FEATURES]).float(
    )  # make 24 -> 21 features as needed; convert to float tensor
    y_val = torch.from_numpy(
        mat3['y_label']).long() - 1  # convert to long tensor for loss function
    y_val = y_val.squeeze(1)

    m_validation = x_val.size()[0]

    #%% Train network
    print('Initialize model...')
    # N is batch size; D_in is input dimension;
    # H is hidden dimension; D_out is output dimension.
    D_in, H, D_out = NUM_FEATURES, 100, num_classes

    ## Create random Tensors to hold inputs and outputs
    #x = torch.randn(N, D_in)
    #y = torch.randn(N, D_out)

    # Use the nn package to define our model and loss function.
    model = torch.nn.Sequential(
        torch.nn.Linear(D_in, H),
        torch.nn.ReLU(),
        torch.nn.Linear(H, H),
        torch.nn.ReLU(),
        torch.nn.Linear(H, H),
        torch.nn.ReLU(),
        torch.nn.Linear(H, H),
        torch.nn.ReLU(),
        torch.nn.Linear(H, D_out),
        torch.nn.Softmax(
            dim=1
        )  # hidden activations are of shape m x D_out, where output features per training example are in 2nd dimension (along rows)
    )
    model = model.cuda()
    #loss_fn = torch.nn.MSELoss(reduction='sum')
    loss_fn = torch.nn.CrossEntropyLoss(
        weight=None,
        size_average=None,
        ignore_index=-100,
        reduce=None,
        reduction='mean'
    )  # input has to be a Tensor of size either (minibatch, C)(minibatch,C)

    # Use the optim package to define an Optimizer that will update the weights of
    # the model for us. Here we will use Adam; the optim package contains many other
    # optimization algoriths. The first argument to the Adam constructor tells the
    # optimizer which Tensors it should update.
    learning_rate = 1e-4
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    #for t in range(500):
    #    # Forward pass: compute predicted y by passing x to the model.
    #    y_pred = model(x)
    #
    #    # Compute and print loss.
    #    loss = loss_fn(y_pred, y)
    #    print(t, loss.item())
    #
    #    # Before the backward pass, use the optimizer object to zero all of the
    #    # gradients for the variables it will update (which are the learnable
    #    # weights of the model). This is because by default, gradients are
    #    # accumulated in buffers( i.e, not overwritten) whenever .backward()
    #    # is called. Checkout docs of torch.autograd.backward for more details.
    #    optimizer.zero_grad()
    #
    #    # Backward pass: compute gradient of the loss with respect to model
    #    # parameters
    #    loss.backward()
    #
    #    # Calling the step function on an Optimizer makes an update to its
    #    # parameters
    #    optimizer.step()

    n_epochs = 100  # or whatever
    batch_size = 128000  # or whatever

    print('Start training model...')
    cache_loss = []
    #  pushing tensors to CUDA device if available (you have to reassign them)
    x = x.to(device)
    y = y.to(device)
    #  pushing tensors to CUDA device if available (you have to reassign them)
    x_val_gpu = x_val.to(device)
    y_val_gpu = y_val.to(device)
    for epoch in range(n_epochs):
        # Store training and validation output scores to calculate accuracy of model after each epoch
        num_train_batches = int(np.ceil(m_training / batch_size))
        num_validation_batches = int(np.ceil(m_validation / batch_size))
        cache_training_acc = np.zeros(num_train_batches)  # numpy rank-1 array
        cache_validation_acc = np.zeros(
            num_validation_batches)  # numpy rank-1 array

        # x is a torch Variable
        permutation = torch.randperm(m_training)

        for i in range(0, m_training, batch_size):
            optimizer.zero_grad()

            indices = permutation[i:i + batch_size]
            batch_x, batch_y = x[indices], y[
                indices]  # includes last mini-batch even if its size < batch_size

            #            #  pushing tensors to CUDA device if available (you have to reassign them)
            #            batch_x = batch_x.to(device)
            #            batch_y = batch_y.to(device)

            # Compute and print loss.
            outputs = model(batch_x)
            loss = loss_fn(outputs, batch_y)
            #        if np.mod(i/batch_size, 50) == 0:
            #            print('Epoch:', epoch, '  Batch ', i/batch_size, 'out of', np.floor(x.size()[0]/batch_size), '  Loss:',  loss.item())

            #            # Cache outputs to calculate training accuracy at end of epoch
            pred_training_labels = scores_to_labels(outputs.detach().cpu())
            cache_training_acc[int(i / batch_size)] = accuracy(
                batch_y.cpu().numpy(), pred_training_labels.numpy())

            # After each epoch, save training and validation losses to plot learning curve
            if (m_training - i) <= batch_size:  # last mini-batch of epoch
                # empty CUDA cache every epoch
                torch.cuda.empty_cache()

                with torch.no_grad():
                    model.eval()

                    # Cycle through mini-batches of validation data to calculate mean loss
                    permutation2 = torch.randperm(m_validation)

                    val_loss = []
                    for j in range(0, m_validation, batch_size):
                        indices_val = permutation2[j:j + batch_size]
                        batch_x_val, batch_y_val = x_val_gpu[
                            indices_val], y_val_gpu[indices_val]
                        #                        batch_x_val, batch_y_val = x_val[indices_val], y_val[indices_val]

                        #                        #  pushing tensors to CUDA device if available (you have to reassign them)
                        #                        batch_x_val_gpu = batch_x_val.to(device)
                        #                        batch_y_val_gpu = batch_y_val.to(device)

                        #                        out = model(batch_x_val_gpu)
                        #                        loss_val = loss_fn(out, batch_y_val_gpu)
                        out = model(batch_x_val)
                        loss_val = loss_fn(out, batch_y_val)
                        val_loss.append(loss_val.item())

                        # Cache outputs to calculate validation accuracy at end of epoch
                        pred_validation_labels = scores_to_labels(
                            out.detach().cpu())
                        cache_validation_acc[int(j / batch_size)] = accuracy(
                            batch_y_val.cpu().numpy(),
                            pred_validation_labels.numpy())


#                        cache_pred_validation[j:j+batch_size] = scores_to_labels(out.cpu().detach().numpy())

# Calculate mean loss across validation mini-batches -- **NOTE does not take weighted mean of mini-batches' losses if last mini-batch's size is smaller
                    mean_val_loss = np.mean(val_loss)
                    acc_train = np.mean(cache_training_acc)
                    acc_val = np.mean(cache_validation_acc)

                    #                    # Calculate training and validation accuracy
                    ##                    train_pred = scores_to_labels(cache_out_training)
                    #                    acc_train = accuracy(y.numpy(), cache_pred_training)
                    #
                    ##                    val_pred = scores_to_labels(cache_out_validation)
                    #                    acc_val = accuracy(y_val.numpy(), cache_pred_validation)

                    # Print trainng and validation losses
                    print('Epoch:', epoch, '  Batch ',
                          i / batch_size, 'out of',
                          np.floor(m_training / batch_size), '  Loss:',
                          loss.item(), 'Val loss:', mean_val_loss,
                          '   Train acc:', acc_train, 'Val acc:', acc_val)
                    #                    print('Epoch:', epoch, '  Batch ', i/batch_size, 'out of', np.floor(m_training/batch_size), '  Loss:',  loss.item())

                    # Save losses to plot learning curve
                    cache_loss.append((epoch, loss.item(), mean_val_loss,
                                       acc_train, acc_val))

                    # reset model to training mode for next epoch
                    model.train()

            # For other mini-batches, just print training loss every nth mini-batch
            elif np.mod(i / batch_size, 50) == 0:
                print('Epoch:', epoch, '  Batch ', i / batch_size, 'out of',
                      np.floor(m_training / batch_size), '  Loss:',
                      loss.item())

            loss.backward()
            optimizer.step()

    #%% Save cached variables to analyze training / learning curve
    with open('arriANN_train_loss.pkl', 'wb') as ff:
        pickle.dump(cache_loss, ff)

    # Save trained model's parameters for inference
    PATH = "C:/Users/CTLab/Documents/George/Arri_analysis_5-6-19/arrinet_ann_model_parameters_5-8-19.pt"
    torch.save(model.state_dict(), PATH)

    print('Done')

    #%% Predict on validation set at end
    model.eval()
    y_pred = predict(model, num_classes, batch_size, device, x_val)
    y_val_true = y_val.numpy()

    print('Computing confusion matrix...')
    conf = confusion_matrix(y_val_true, y_pred)
コード例 #13
0
                        category=DeprecationWarning)

grupos = 5
np.random.seed(32)
#n_est = 100

lista_bases = ['Lymphoma']
lista_fam = ['S', 'T', 'K', 'D']

tiempo = np.zeros(shape=(1))

for indice_lst in lista_bases:
    for indice_fam in lista_fam:

        nombre_base = indice_lst + '_Caracteristicas' + indice_fam + '.mat'
        Datos = hdf5storage.loadmat(nombre_base)

        if indice_fam == 'S':
            Caracteristicas = Datos['CaracteristicasS']
        elif indice_fam == 'T':
            Caracteristicas = Datos['CaracteristicasT']
        elif indice_fam == 'K':
            Caracteristicas = Datos['CaracteristicasK']
        else:
            Caracteristicas = Datos['CaracteristicasD']

        etiqueta = Datos['etiquetas']
        etiquetas = etiqueta.reshape((etiqueta.shape[0]))

        Resultados = np.zeros(shape=(4, 5))
        ResultadosSTD = np.zeros(shape=(4, 5))
コード例 #14
0
    sid = int(float(sys.argv[1]))

else:  # debug in IDE
    sid = 10

tf_dir = data_dir + '/tfAnalysis/P' + str(sid) + '/'
input_dir = data_dir + 'preprocessing/P' + str(sid) + '/preprocessing2.mat'
if not os.path.exists(tf_dir):
    os.makedirs(tf_dir)

Session_num, UseChn, EmgChn, TrigChn = get_channel_setting(sid)
#original_fs=[Frequencies[i,1] for i in range(Frequencies.shape[0]) if Frequencies[i,0] == pn][0]
fs = 1000
if sid == 11 or sid == 12:
    fs = 500
mat = hdf5storage.loadmat(input_dir)
data = mat['Datacell']
channelNum = int(mat['channelNum'][0, 0])
data = np.concatenate((data[0, 0], data[0, 1]), 0)
del mat

# stim0 is trigger channel, stim1 is trigger position calculated from EMG signal.
chn_names = np.append(["seeg"] * channelNum,
                      ["emg0", "emg1", "stim_trigger", "stim_emg"])
chn_types = np.append(["seeg"] * channelNum, ["emg", "emg", "stim", "stim"])
info = mne.create_info(ch_names=list(chn_names),
                       ch_types=list(chn_types),
                       sfreq=fs)
raw = mne.io.RawArray(data.transpose(), info)

# gesture/events type: 1,2,3,4,5
コード例 #15
0
ファイル: test_video.py プロジェクト: mkabra/poseTF
##
if False:
    mov = '/groups/branson/bransonlab/al/C001H001S0007_c.avi'
    win_mat = '/groups/branson/bransonlab/al/C001H001S0007_c.avi.readFrames_1450_allenwin_16b.mat'

    py_mat = '/groups/branson/home/kabram/temp/C001H001S0007_c.avi.readFrames_1450_kabram_py.mat'
    H = sio.loadmat(win_mat)

    nfr = H['I'].shape[0]
    ims = np.zeros([512,768,nfr])
    for ndx in range(nfr):
        ims[:,:,ndx] = H['I'][ndx][0]

    import os
    if os.path.exists(py_mat):
        P = hdf5storage.loadmat(py_mat)
        py_ims = P['I']
    else:
        py_ims = None
    ## random access

    cap = movies.Movie(mov)
    dd = []
    for ndx in range(10):
        curfr = np.random.randint(nfr)
        curim = cap.get_frame(curfr)[0][:,:,0]
        ff = curim.astype('float64')+1-ims[:,:,curfr]
        cur_i = [curfr, np.abs(ff).max()]
        if py_ims is not Noneimp:
            ffp = curim.astype('float64')-py_ims[:,:,curfr]
            cur_i.append(np.abs(ffp).max())
コード例 #16
0
fp ='C:/Users/sleblan2/Research/KORUS-AQ/'


# In[7]:

vr = 'R0'


# # Load files
# 

# ## Load the AOD files from 4STAR

# In[8]:

ar = hs.loadmat(fp+'/aod_ict/all_aod_KORUS_ict.mat')


# In[9]:

ar.keys()


# ## Adjust the AOD to reflect dirt contamination

# In[10]:

arc = {}
arc['AOD0501'] = ar['AOD0501']-ar['UNCAOD0501']

コード例 #17
0
                    action='store_true')


# In[ ]:


in_ = vars(parser.parse_args())
do_read = in_.get('doread',False)


# # Load the saved files

# In[5]:


ar = hs.loadmat(matfile)


# In[6]:


ar.keys()


# In[12]:


dds = ['20160827','20160830','20160831','20160902','20160904','20160906','20160908',
       '20160910','20160912','20160914','20160918','20160920','20160924','20160925','20160927']

コード例 #18
0
# In[2]:

hs.__version__


# In[3]:

from mpl_toolkits.basemap import Basemap,cm


# In[4]:

get_ipython().magic(u'matplotlib notebook')


# In[6]:

fp_lut_mat = 'C:\\Users\\sleblan2\\Research\\NAAMES\\lut\\v2_NAAMES_lut.mat'
print('Loading the lut file:{}'.format(fp_lut_mat))
if not os.path.isfile(fp_lut_mat):
    print('File {} does not exist'.format(fp_lut_mat))
    raise IOError('LUT File not found: {}'.format(fp_lut_mat))
luts = hs.loadmat(fp_lut_mat)


# In[ ]:



コード例 #19
0
# In[53]:

hs.savemat(fp+'/aod_ict/{vv}/all_aod_ict_{vv}.mat'.format(vv=vv),ar)


# ## Optionally load the file

# In[1]:

import hdf5storage as hs


# In[8]:

ar = hs.loadmat(fp+'/aod_ict/all_aod_ict.mat')


# ## Plot a histogram of all the AOD

# In[29]:

from plotting_utils import prelim


# In[30]:

plt.figure()
plt.hist(ar['AOD0501'][ar['fl']],bins=30,range=(0,1.0),alpha=0.5,normed=False,edgecolor='None',color='g',label='600-1800 m')
plt.hist(ar['AOD0501'][ar['fl1']],bins=30,range=(0,1.0),alpha=0.5,normed=False,edgecolor='None',color='b',label='below 600 m')
#plt.hist(ar['AOD0501'][ar['fl2']],bins=30,range=(0,1.0),alpha=0.5,normed=False,edgecolor='None',color='r',label='above 1800 m')
コード例 #20
0
def main():
    Classifier_path = '/Users/hyuns/Desktop/LDAModel/'
    classifier_list = sorted(glob.glob(Classifier_path + '*.pickle'),
                             key=os.path.getmtime,
                             reverse=True)
    print("classifer:", classifier_list[0])

    lda = joblib.load(classifier_list[0])

    root_path = '/Users/hyuns/Desktop/HGU/2020-2/Capstone/Drone Project/EEGData/VR300_Data/0729/hs/'
    resampleRate = 100

    mat_path = root_path + 'Online/'
    current_list = sorted(glob.glob(mat_path + '*.mat'),
                          key=os.path.getmtime,
                          reverse=True)
    score = 0

    for mat_file in current_list:
        print(mat_file)
        ans = mat_file[-5]

        mat = hdf5storage.loadmat(mat_file)
        channelNames = mat['channelNames']
        eegData = mat['eegData']
        samplingFreq = mat['samplingFreq']
        samplingFreq = samplingFreq[0, 0]
        stims = mat['stims']
        channelNum = channelNames.shape
        channelNum = channelNum[1]
        eegData = np.transpose(eegData)
        buttonNum = 7

        #Bandpass Filter
        eegData = butter_bandpass_filter(eegData,
                                         0.1,
                                         30,
                                         samplingFreq,
                                         order=4)

        #Epoching
        epochSampleNum = int(np.floor(1.0 * samplingFreq))
        offset = int(np.floor(0.0 * samplingFreq))
        baseline = int(np.floor(1.0 * samplingFreq))

        ####### averaging whole epochs
        Epochs_Aver = np.zeros((buttonNum, channelNum, epochSampleNum))
        Epochs_final = np.zeros((buttonNum, channelNum, resampleRate))

        featureNum = channelNum * resampleRate

        for i in range(buttonNum):
            Epochs_Aver[i] = Epoching(eegData, stims, (i + 1), samplingFreq,
                                      channelNum, epochSampleNum, offset,
                                      baseline)
            Epochs_final[i] = resampling(Epochs_Aver[i], resampleRate,
                                         channelNum)

        Features = Convert_to_FeatureVector(Epochs_final, buttonNum,
                                            featureNum)

        Answers = lda.decision_function(Features)
        answer = np.argmax(Answers) + 1

        print(Answers)
        if int(ans) == int(answer):
            score = score + 1
        print('order: ', ans, 'predict: ', answer)

    print('score:', score)
コード例 #21
0
def h5_to_panda(_file, _id):
    if _id == "PI":
        thisid = "Number_of_sips"
    else:
        thisid = _id
    dataid = "data2/" + thisid
    pid = "PVALS/" + thisid
    out = hdf5storage.loadmat(_file, variable_names=[dataid, pid, "LABELS"])

    datapoints = unrv_data(out[dataid])
    pvals = unrv_data(out[pid])

    labels = unrv_labels(out["LABELS"])
    labels = [label[0,0] for label in labels]
    labels[0] = "control"

    yeast_data = datapoints[0]
    sucrose_data = datapoints[1]
    yeast_ps = pvals[0]
    sucrose_ps = pvals[1]
    numdtpoints = yeast_data.size-np.count_nonzero(~np.isnan(yeast_data))
    if _id == "PI":
        PIout = {"Label": [], "Data": [], "Median": [], "Signif": []}
        contr = (yeast_data[:, 0] - sucrose_data[:, 0]) / (yeast_data[:, 0] + sucrose_data[:, 0])
        Pvals = {}
        for col in range(yeast_data.shape[1]):      # different labels
            PIcol = (yeast_data[:, col] - sucrose_data[:, col]) / (yeast_data[:, col] + sucrose_data[:, col])
            PImedian = np.nanmedian(PIcol)
            PIcol = PIcol[~np.isnan(PIcol)]
            s, PIpval = stat.ranksums(contr, PIcol)
            if np.isnan(PIpval):
                PIpval = 1
            Pvals[labels[col]] = PIpval
            for row in range(yeast_data.shape[0]):      # different datapoints same label
                if ~np.isnan(yeast_data[row, col]) and ~np.isnan(sucrose_data[row, col]):
                    PIout["Label"].append(labels[col])
                    PIout["Data"].append(PIcol[row])
                    PIout["Median"].append(PImedian)
                    PIout["Signif"].append("Yes" if math.log10(1./PIpval)>2 else "No")
        PIout = pd.DataFrame(PIout)
        return PIout, Pvals
    else:
        Yout = {"Label": [], "Data": [], "Median": [], "Signif": []}
        Sout = {"Label": [], "Data": [], "Median": [], "Signif": []}
        for row in range(yeast_data.shape[0]):      # different datapoints same label
            for col in range(yeast_data.shape[1]):      # different labels
                if ~np.isnan(yeast_data[row, col]):
                    Yout["Label"].append(labels[col])
                    Yout["Data"].append(yeast_data[row, col])
                    Yout["Median"].append(np.nanmedian(yeast_data[:,col]))
                    Yout["Signif"].append("Yes" if math.log10(1./yeast_ps[col])>2 else "No")
                if ~np.isnan(sucrose_data[row, col]):
                    Sout["Label"].append(labels[col])
                    Sout["Data"].append(sucrose_data[row, col])
                    Sout["Median"].append(np.nanmedian(sucrose_data[:,col]))
                    Sout["Signif"].append("Yes" if np.log10(1./sucrose_ps[col])>2 else "No")
        Ydf = pd.DataFrame(Yout)
        Sdf = pd.DataFrame(Sout)
        Pvals = {}
        for ind, label in enumerate(labels):
            Pvals[label] = [yeast_ps[ind], sucrose_ps[ind]]
        return [Ydf, Sdf], Pvals
コード例 #22
0

# In[4]:


vv = 'v6'


# # Load some files for easier processing

# ## get the retrieval results

# In[5]:


m_dict = hs.loadmat(fp+'20130913_retrieval_output.mat')


# In[6]:


m_dict.keys()


# In[7]:


if not 'emas_tau_full' in vars():
    print 'not defined, loading from file'
    emas_tau_full = m_dict['emas'][1]; emas_ref_full = m_dict['emas'][3]; emas_utc_full = m_dict['emas'][5]
    modis_tau = m_dict['modis'][1]; modis_ref = m_dict['modis'][3]
コード例 #23
0
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import keras
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
import random
import numpy as np
from matplotlib import pyplot as p
from scipy.io import loadmat, savemat

import hdf5storage

'TRAINING SET'
# load train set from .mat files
y_trainbig = hdf5storage.loadmat('../data/test_x_big.mat')
y_trainbig = y_trainbig['g']
y_train = y_trainbig[:, 50000:300000]
del y_trainbig
x_trainbig = hdf5storage.loadmat('../data/test_y_big.mat')
x_trainbig = x_trainbig['s']
x_train = x_trainbig[0:3, 50000:300000]
del x_trainbig

'shuffle data in the training set'
indi = np.arange(y_train.shape[1])
np.random.shuffle(indi)
y_train = y_train[:, indi]
x_train = x_train[:, indi]

'TEST SET'
コード例 #24
0
def readData(input_file, sliceNo):
    if os.path.isdir(input_file):
        fl = [os.path.join(input_file, f)
              for f in os.listdir(input_file)
              if os.path.isfile(os.path.join(input_file, f))]
        print('{:} is a directory.'.format(input_file))
        if sliceNo is not None:
            if sliceNo == 'mean':
                # read each file and average
                data0 = np.load(fl[0])
                data0 = np.zeros(data0.shape, dtype=np.float64, order='F')
                for f in fl:
                    data0 = data0 + np.load(f)
                paData = data0 / len(fl)
            else:
                try:
                    sliceNo = int(sliceNo)
                    paData = np.load(
                        os.path.join(input_file,
                                     '{:06d}.npy'.format(sliceNo)))
                    paData = paData.astype(np.float64)
                except ValueError:  # assuming a list was given
                    sliceNo = sorted([int(v) for v in sliceNo.split()])
                    data0 = np.load(
                        os.path.join(input_file,
                                     '{:06d}.npy'.format(sliceNo[0])))
                    paData = np.zeros((data0.shape[0], data0.shape[1],
                                       sliceNo[1] - sliceNo[0]),
                                      dtype=np.float64, order='F')
                    paData[:, :, 0] = data0
                    for ind in range(sliceNo[0] + 1, sliceNo[1]):
                        paData[:, :, ind - sliceNo[0]] =\
                            np.load(os.path.join(input_file,
                                                 '{:06d}.npy'.format(ind)))
        else:
            fl = sorted(fl)
            data0 = np.load(fl[0])
            paData = np.zeros((data0.shape[0], data0.shape[1], len(fl)),
                              dtype=np.float64, order='F')
            for i in range(len(fl)):
                f = fl[i]
                paData[:, :, i] = np.load(f)
                ReconUtility.updateProgress(i+1, len(fl))
    else:
        (basename, ext) = os.path.splitext(input_file)
        in_format = ext[1:]
        # read out data
        if in_format == 'h5':
            print('Reading data from {:}'.format(input_file))
            f = h5py.File(input_file, 'r')
            paData = np.array(f['data'], order='F')
            f.close()
            print('Done loading.')
        elif in_format == 'npy':
            paData = np.load(input_file)
            paData = np.copy(paData, order='F')
        elif in_format == 'mat':
            variableName = input('Variable in mat: ')
            paData = np.array(h5.loadmat(input_file)[variableName], order='F')
        else:
            print('input format %s not supported' % in_format)
            return
        if sliceNo is not None:
            if sliceNo == 'mean':
                print('reconstructing averaged')
                paData = np.array(np.mean(paData, axis=2), order='F')
            else:
                sliceNo = int(sliceNo)
                print('reconstructing slice #{:d}'.format(sliceNo))
                paData = np.copy(paData[:, :, sliceNo], order='F')
        paData = paData.astype(np.float64)
    return paData
コード例 #25
0
class doc3dSELoader(data.Dataset):
    pass


if __name__ == "__main__":
    ROOT = 'C:/Users/yuttapichai.lam/dev-environment/dataset/'
    FNAME = '1_1_2-pm_Page_144-Tv60001'
    img_path = ROOT + 'img/' + FNAME + '.png'
    img = m.imread(img_path, mode='RGB')
    # img = np.array(img, dtype=np.uint8)
    norm_path = ROOT + 'norm/' + FNAME + '.exr'
    norm = cv2.imread(norm_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
    norm = cv2.cvtColor(norm, cv2.COLOR_RGB2BGR)
    # norm = np.array(norm, dtype=np.float)
    bm_path = ROOT + 'bm/' + FNAME + '.mat'
    bm = h5.loadmat(bm_path)['bm']
    # bm = np.array(bm, dtype=np.float)

    img = img[:, :, ::-1]
    im = img.transpose((2, 0, 1))
    im = im.astype(np.float) / 255.0
    im = np.expand_dims(im, 0)
    im = torch.from_numpy(im).float()


    bm = bm / np.array([448, 448])
    bm = (bm - 0.5) * 2
    bm0 = bm[:, :, 0]
    bm1 = bm[:, :, 1]
    bm = np.stack([bm0, bm1], axis=-1)
    # bm = np.reshape(bm, (1, 448, 448, 2))
コード例 #26
0
ファイル: ttideNew.py プロジェクト: jklymak/ttide15
    f=1.e-4
    omega = Hk[3]

    x,z,H,P,debug=sr.SolveRefl(k=k,Nsq0=Nsq0,z0=z0,omega=omega*f,f=f,wall=True,x=xmod*1e3,
                           H=H,J=192,Incoming=False)
    ind = np.where(x>(x[-1]-70e3))[0]
    res=np.mean(np.abs(np.real(P[:,ind]*np.exp(1j*0.)))*H[np.newaxis,ind])
    return res,k

if __name__ == '__main__':
    todo = ['Shelf100km','Shelf1km03','Shelf1km04','Shelf020km']
    Hwkb=[]

    xmod=np.linspace(0.,260.,261)
    for nn,td in enumerate(todo):
        D = hdf5storage.loadmat('../ttide15/Tas3d/%s/Diags0360.mat'%td)
        H = D['Depth'].transpose()[:,-1]
        x = -D['x']/1e3-30.+xmod[-1]
        ray=mf.loadmatbunch ('../TasmaniaRays.mat')
        ray = ray['ray']
        z=np.arange(0.,5000.,2.)

        N=  np.interp(z,ray['z'],np.sqrt(ray['N2']))
        N0 = np.mean(N)
        Hwkb.append(np.interp(-xmod,-x,H))

    lam = np.linspace(30,180.0,140)*1e3
    ks = np.pi*2./lam
    oms = np.linspace(1.,2.,3)
    oms = oms[1:]
    z0 = np.linspace(-6500,0.,1000)
コード例 #27
0
from mne.time_frequency import tfr_morlet
import math
from gonogo.config import *

sid = 6  #4
l_freq = 0.5
h_freq = 2
data_dir = '/Volumes/Samsung_T5/data/ruijin/gonogo/preprocessing/P' + str(sid)
plot_dir = data_dir + '/erpPlots/'
if not os.path.exists(plot_dir):
    os.makedirs(plot_dir)

#Session_num,UseChn,EmgChn,TrigChn = get_channel_setting(sid)
#original_fs=[Frequencies[i,1] for i in range(Frequencies.shape[0]) if Frequencies[i,0] == pn][0]
loadPath = data_dir + '/preprocessing/preprocessingv2.mat'
mat = hdf5storage.loadmat(loadPath)
fs = mat['Fs']
rtime = mat['ReactionTime']
rtime = np.concatenate((rtime[0, 0], rtime[0, 1]), axis=0)
data = mat['DATA']
data = np.concatenate((data[0, 0], data[0, 1]), axis=0)  #(2160440, 63)
events = mat['Trigger']
events = np.concatenate((events[0, 0], events[0, 1]), axis=0)  # two sessions
events[:, [1, 2]] = events[:, [
    2, 1
]]  # swap 1st and 2nd column to: timepoint, duration, event code
events = events.astype(int)

del mat

chn_num = data.shape[1]
コード例 #28
0
dds = ['20160827','20160830','20160831','20160902','20160904','20160906','20160908',
       '20160910','20160912','20160914','20160918','20160920','20160924','20160925','20160927']


# In[4]:

rts = []
sps = []


# In[5]:

for daystr in dds:
    print daystr
    rt = hs.loadmat(fp+'{}_zen_cld_retrieved.mat'.format(daystr))
    s = sio.loadmat(fp+'4STAR_{}starzen.mat'.format(daystr))
    sp = Sp.Sp(s)
    rts.append(rt)
    sps.append(sp)


# ## Load the cloud probe incloud flag

# In[6]:

from load_utils import mat2py_time,toutc


# In[7]:
コード例 #29
0
# ## Load the 4STAR starsun

# In[44]:

f_star = fp+'data\\{}starsun.mat'.format(daystr)


# In[45]:

try:
    s = sio.loadmat(f_star+'_small.mat')
except IOError:
    s = sio.loadmat(f_star)
except NotImplementedError:
    s = hs.loadmat(f_star+'_small.mat')


# In[46]:

s.keys()


# In[47]:

s['utc'] = lm.toutc(lm.mat2py_time(s['t']))


# In[48]:

s['tau_aero'].shape
コード例 #30
0
    a0 = nearest_neighbor(star_a[i]['Start_UTC'],star_a[i]['AOD_polycoef_a0'],ssfr_a[i]['Start_UTC'],dist=3.0/3600.0)
    ssfr_a[i]['a0'] = a0


# In[220]:


ssfr_a[0]['flagacaod'].shape,ssfr_a[0]['Start_UTC'].shape


# ## Load the LUT for 2wvl reflectance retrieval

# In[30]:


lut = hs.loadmat(fp+'rtm/v5_irr_ORACLES_lut.mat')


# In[31]:


lut.keys()


# ## Combine into one array

# In[249]:


nm = ssfr_a[1].keys()
コード例 #31
0
       '20160910','20160912','20160914','20160918','20160920','20160924','20160925','20160927']


# In[5]:


rts = []
sps = []


# In[6]:


for daystr in dds:
    print daystr
    rt = hs.loadmat(fp+'{}_zen_cld_retrieved.mat'.format(daystr))
    s = sio.loadmat(fp+'4STAR_{}starzen.mat'.format(daystr))
    sp = Sp.Sp(s)
    rts.append(rt)
    sps.append(sp)


# ## Load the cloud probe incloud flag

# In[7]:


from load_utils import mat2py_time,toutc


# In[8]:
コード例 #32
0
suffix = ""
if(N_EV <= 11): # +1 aggregator
   suffix = "_10"
elif(N_EV <= 101): # +1 aggregator
     suffix = "_100"
elif(N_EV <= 1001):
     suffix = "_1000"
elif(N_EV <= 10001):
     suffix = "_10000"
elif (N_EV <= 100001) :
     suffix = "_100000"
elif (N_EV <= 1000001) :
     suffix = "_1000000"
               
file_aggr = DATA_DIR + '/Aggregator/aggregator' + suffix + '.mat'
aggr = hdf5storage.loadmat(file_aggr)

D = aggr['D'][()]

                        
'''
                        
data = sio.loadmat(name)
#data = h5py.File(name, 'r') # open read-only

for key,val in data.items() :
       
    if(key == 'r_norm'):       
       r_norm = data[key][()]
          
    if(key == 's_norm'):
コード例 #33
0
ファイル: app.py プロジェクト: rinadwih27/iforest
def display():
    global filename_pca
    if os.path.isfile(os.path.join('static/img/', filename_pca)):
        os.remove(os.path.join('static/img/', filename_pca))
    if session['data'].split('.')[-1] == 'csv':
        df = pd.read_csv(session['data'])
    elif session['data'].split('.')[-1] == 'mat':
        mat = hd.loadmat(session['data'])
        X = pd.DataFrame(mat['X'])
        Y = pd.DataFrame(mat['y'])
        Y.rename(columns={0: 'CLASS'}, inplace=True)
        df = pd.concat([X, Y], axis=1)
    shape = df.shape

    desc = df.describe().values
    lstCol = ['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max']
    lstc = pd.DataFrame(lstCol)
    descc = pd.DataFrame(desc)
    lstc.rename(columns={0: 'Stat'}, inplace=True)
    descr = pd.concat([lstc, descc], axis=1)
    print(descr)

    # -- Standarisasi Data --
    kelas = df.columns[-1]
    # print(df.columns)
    sb_x = df.iloc[:, :-2]
    sb_y = df.loc[:, kelas]
    sb_x = StandardScaler().fit_transform(sb_x)

    pca = PCA(n_components=2)
    attr_x = pca.fit_transform(sb_x)
    # print(attr_x)
    red_data = pd.DataFrame(attr_x, columns=['A', 'B'])
    print(red_data.shape)
    print(df.loc[:, kelas].shape)
    print(df.loc[:, kelas])

    finalData = pd.concat([red_data, df.loc[:, kelas]], axis=1)

    # print(red_data.values)

    fig = plt.figure(figsize=(8, 8))
    ax = fig.add_subplot(1, 1, 1)
    ax.set_xlabel('A', fontsize=15)
    ax.set_ylabel('B', fontsize=15)
    outcomes = [1, 0]
    colors = ['r', 'g']
    for Outcome, color in zip(outcomes, colors):
        index = finalData[kelas] == Outcome
        ax.scatter(finalData.loc[index, 'A'],
                   finalData.loc[index, 'B'],
                   c=color,
                   s=50)
        ax.legend(outcomes)
        ax.grid()

    #  Random filename
    filename_pca = ''.join(
        random.choices(string.ascii_uppercase + string.digits, k=5))
    filename_pca = filename_pca + '.png'
    # print(filename)
    plt.savefig(os.path.join('static/img/', filename_pca))
    filepath = 'img/' + filename_pca

    return render_template(
        'display.html',
        tables=[
            df.to_html(table_id='dataset',
                       border=0,
                       classes='display table table-striped table-hover')
        ],
        titles=df.columns.values,
        data=df.values,
        column=df.columns,
        shape=shape,
        descr=descr.values,
        filepath=filepath)
days = ['']


# In[7]:


day = '20160914'


# ## Load 4STAR gap distance calculations for ORACLES 2016

# In[9]:


gap = hs.loadmat(fp+'ORACLES_2016_gap_distance.mat')


# In[10]:


gap['ldelta_alt'].shape


# In[287]:


gap['days'] = [i[0] for i in gap['ldelta_lat_days']]


# In[303]:
コード例 #35
0
import numpy as np
from hdf5storage import loadmat
from os.path import join
	
import sys
imset = sys.argv[1].lower().capitalize()

DS_PATH = 'D:/datasets/processed/voc2012' 
GT_PATH = join(DS_PATH, 'Truth', imset)
PRED_PATH = join(DS_PATH, 'Deeplab_Prediction', imset)

ds_info = loadmat(join(DS_PATH, 'dataset_info.mat'))
num_img = 350

if imset.lower() != 'val':
	num_img = 1449-num_img

LGT_FMT = join(PRED_PATH, imset.lower()+'_%06d_logits.mat')
LGT_MAT_NAME = 'logits_img'

GT_FMT = join(GT_PATH, imset.lower()+'_%06d_pixeltruth.mat')
GT_MAT_NAME = 'truth_img'
	
conf_res = 0.05
conf_hist = np.zeros((int(1./conf_res)), dtype=np.uint64)
	
for idx in range(1, num_img+1):
	gt = loadmat(GT_FMT % idx)[GT_MAT_NAME].ravel()
	mask = (gt>0) & (gt<255)

	logits = loadmat(LGT_FMT % idx)[LGT_MAT_NAME][...,1:].reshape(-1, 20)
コード例 #36
0
hs.savemat(fp+'/aod_ict_2017/{vv}/all_aod_ict_{vv}_2017.mat'.format(vv=vv),ar)


# ## Optionally load the file

# In[8]:


import hdf5storage as hs


# In[9]:


ar = hs.loadmat(fp+'/aod_ict_2017/{vv}/all_aod_ict_{vv}_2017.mat'.format(vv=vv))


# ## Plot a histogram of all the AOD

# In[10]:


from plotting_utils import prelim


# In[198]:


plt.figure()
plt.hist(ar['AOD0501'][ar['fl']],bins=30,range=(0,1.0),alpha=0.5,normed=False,edgecolor='None',color='g',label='600-1800 m')
コード例 #37
0
ファイル: tang_feat.py プロジェクト: ilyakava/pyfst
def tang_run_acc(data, labels, traintestfilenames=None):
    """
    """
    [height, width, nbands] = data.shape


    all_pixels = np.array(list(itertools.product(range(width),range(height))))
    labelled_pixels = np.array(filter(lambda (x,y): labels[y,x] != 0, all_pixels))
    flat_labels = labels.transpose().reshape(height*width)
    nlabels = len(set(flat_labels.tolist())) - 1

    padded_data = np.pad(data, ((9,9),(9,9),(9,9)), 'wrap')

    print('requesting %d MB memory' % (labelled_pixels.shape[0] * 271*nbands * 4 / 1000000.0))
    labelled_pix_feat = np.zeros((labelled_pixels.shape[0], 271*nbands), dtype=np.float32)
    
    def compute_features():
        x = tf.placeholder(tf.float32, shape=(19,19,nbands+18))
        feat = tang_net(x)
        sess = tf.Session()
        sess.run(tf.global_variables_initializer())

        for pixel_i, pixel in enumerate(tqdm(labelled_pixels)):
            # this iterates through columns first
            [pixel_x, pixel_y] = pixel
            subimg = padded_data[pixel_y:(pixel_y+19), pixel_x:(pixel_x+19), :]
        
            feed_dict = {x: subimg}
            labelled_pix_feat[pixel_i,:] = sess.run(feat, feed_dict)
    compute_features()

    for traintestfilename in traintestfilenames:
        mat_contents = None
        try:
            mat_contents = sio.loadmat(os.path.join(DATA_PATH, traintestfilename))
        except:
            mat_contents = hdf5storage.loadmat(os.path.join(DATA_PATH, traintestfilename))
        train_mask = mat_contents['train_mask'].astype(int).squeeze()
        test_mask = mat_contents['test_mask'].astype(int).squeeze()
        # resize train/test masks to labelled pixels
        train_mask_skip_unlabelled = train_mask[flat_labels!=0]
        test_mask_skip_unlabelled = test_mask[flat_labels!=0]

        # get training set
        trainY = flat_labels[train_mask==1]
        trainX = labelled_pix_feat[train_mask_skip_unlabelled==1,:]

        print('training now')
        start = time.time()
        clf = SVC(kernel='linear')
        clf.fit(trainX, trainY)
        end = time.time()
        print(end - start)

        # now test
        test_chunk_size = 1000
        testY = flat_labels[test_mask==1]
        Yhat = np.zeros(testY.shape)
        testX = labelled_pix_feat[test_mask_skip_unlabelled==1,:]
        C = np.zeros((nlabels,nlabels))
        print('testing now')
        for i in tqdm(range(0,len(testY),test_chunk_size)):
            p_label = clf.predict(testX[i:i+test_chunk_size,:]);
            Yhat[i:i+test_chunk_size] = np.array(p_label).astype(int)
            C += confusion_matrix(testY[i:i+test_chunk_size], p_label, labels=list(range(1,nlabels+1)))

        pred_image = np.zeros(flat_labels.shape)
        pred_image[test_mask==1] = Yhat

        mat_outdata = {}
        mat_outdata[u'metrics'] = {}
        mat_outdata[u'metrics'][u'CM'] = C
        mat_outdata[u'pred_image'] = pred_image.reshape((width, height)).transpose()
        hdf5storage.write(mat_outdata, filename=os.path.join(DATA_PATH, traintestfilename+'_117_WST3D_expt.mat'), matlab_compatible=True)
コード例 #38
0
ファイル: data_utils.py プロジェクト: uberstig/ChaosMagPy
def load_matfile(filepath, variable_names=None, **kwargs):
    """
    Load mat-file and return dictionary.

    Function loads mat-file by traversing the structure converting data into
    low-level numpy arrays of different types. There is no guarantee that any
    kind of data is read in correctly. The data dtype can also vary depending
    on the mat-file (v7.3 returns floats instead of integers). But it should
    work identically for v7.3 and prior mat-files. Arrays are squeezed if
    possible. Relies on the :mod:`hdf5storage` package.

    Parameters
    ----------
    filepath : str
        Filepath and name of mat-file.
    variable_names : list of strings
        Top-level variables to be loaded.
    **kwargs : keywords
        Addidional keyword arguments are passed to :func:`hdf5storage.loadmat`.

    Returns
    -------
    data : dict
        Dictionary containing the data as dictionaries or numpy arrays.

    """

    # define a recursively called function to traverse structure
    def traverse_struct(struct):

        # for dictionaries, iterate through keys
        if isinstance(struct, dict):
            out = dict()
            for key, value in struct.items():
                out[key] = traverse_struct(value)
            return out

        # for ndarray, iterate through dtype names
        elif isinstance(struct, np.ndarray):

            # collect dtype names if available
            names = struct.dtype.names

            # if no fields in array
            if names is None:
                if struct.dtype == np.dtype('O') and struct.shape == (1, 1):
                    return traverse_struct(struct[0, 0])
                else:
                    return struct.squeeze()

            else:  # if there are fields, iterate through fields
                out = dict()
                for name in names:
                    out[name] = traverse_struct(struct[name])
                return out

        else:
            return struct

    output = hdf.loadmat(filepath, variable_names=variable_names, **kwargs)

    # loadmat returns dictionary, go through keys and call traverse_struct
    for key, value in output.items():
        if key.startswith('__') and key.endswith('__'):
            pass
        else:
            output[key] = traverse_struct(value)

    return output
コード例 #39
0
#!/usr/bin/env python
# coding: utf-8

# In[3]:



import hdf5storage
import pandas as pd


dataname='alldata_1316_fixed.mat'
mat = hdf5storage.loadmat(dataname)
pdata = pd.DataFrame(mat['Data'],columns = ['DATE','PERMNO','VOL','SHROUT','retadj','LME','ret','prca','OPENPRC','divamt','facpra','facshr','SHRCD','EXCHCD','DLRET','DLPRC','DISTCD','PERMCO','weight_port','lprc','ME','ticker_idx','SIZEPORT','BTMPORT','OPPORT','INVPORT','RRGRP'])
# startdate = 20150901
# enddate = 20150901
# ind=(pdata['DATE'] >= startdate) & (pdata['DATE']  <= enddate)
DataPc=pdata
entry=[1,2,19,8,5,10,11,12,6,15,16,17,20,21,23,25,9]
#entry =  [1,2,19,8,5,9,10,11,12,6,15,16,17,20,21,22,23,24,25,26,27]
entryew=[i-1 for i in entry]
DataPcnew=DataPc.iloc[:,entryew].copy()
#DataPcnew.columns.values.tolist()
DataPcnew['prca']=DataPcnew['prca'].abs()
DataPcnew['lprc']=DataPcnew['lprc'].abs()


# In[11]:


コード例 #40
0
ファイル: filtering.py プロジェクト: Abousamah/SADA
images = read_images_to_np(path,
                           n,
                           n,
                           extension="all",
                           allowmax=True,
                           maxnbr=1000,
                           d_type=np.float32,
                           mode="RGB")
gray_images = [cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) for image in images]
plt.figure()
plt.imshow(gray_images[RANDOM_NO], cmap='gray')
plt.title("Filtering by matrix multiplicatin")
plt.show(block=False)
print("finished loading the images\n")
if LOAD_MAT:
    mat = hdf5storage.loadmat('filtering.mat')
    k_padded = mat["k_padded"]
    A = mat["A_unfolded"]
    K = mat["kernel"]
    print("the Kernel\n", K, "\n\n")
    # print("the Kernel padded \n",k_padded,"\n\n")
    # k_padded = sio.loadmat("filtering.mat")["kernel"]
    # A = sio.loadmat("filtering.mat")["A_unfolded"]
    X = gray_images[RANDOM_NO]
    x = X.reshape(-1, 1)

# K = np.array(range(1,m**2+1)).reshape(m,m)
else:
    K = np.ones((m, m), np.float32)
    k_padded = np.pad(K, ((skip, skip), (skip, skip)), 'constant')
    print("the Kernel\n", K, "\n\n")
                        help='saving path that is a folder')
    parser.add_argument('--task_name',
                        type=str,
                        default='track2',
                        help='task name for loading networks, saving, and log')
    # NTIRE2020_Validation_Clean    NTIRE2020_Validation_RealWorld
    opt = parser.parse_args()

    sample_folder = os.path.join(opt.val_path, opt.task_name)
    utils.check_path(sample_folder)

    imglist = utils.get_jpgs(opt.path1)
    for imgname in imglist:
        # Read
        data_path1 = os.path.join(opt.path1, imgname)
        data1 = hdf5.loadmat(data_path1)['cube']
        data_path2 = os.path.join(opt.path2, imgname)
        data2 = hdf5.loadmat(data_path2)['cube']
        data_path3 = os.path.join(opt.path3, imgname)
        data3 = hdf5.loadmat(data_path3)['cube']
        data_path4 = os.path.join(opt.path4, imgname)
        data4 = hdf5.loadmat(data_path4)['cube']
        data_path5 = os.path.join(opt.path5, imgname)
        data5 = hdf5.loadmat(data_path5)['cube']
        data_path6 = os.path.join(opt.path6, imgname)
        data6 = hdf5.loadmat(data_path6)['cube']
        data_path7 = os.path.join(opt.path7, imgname)
        data7 = hdf5.loadmat(data_path7)['cube']
        data_path8 = os.path.join(opt.path8, imgname)
        data8 = hdf5.loadmat(data_path8)['cube']
        data_avg = (data1 + data2 + data3 + data4 + data5 + data6 + data7 +
コード例 #42
0
ファイル: test.py プロジェクト: dorsal1464/hwsec_preprocess
                        f[np.where(f < 500)],
                        Sxx[np.where(f < 500)[0], :],
                        shading='gouraud',
                        cmap='Greys',
                        norm=Normalize())
fig.colorbar(cf, ax=axes[2])
axes[2].set_ylabel('Frequency [MHz]')
axes[2].set_xlabel('Time samples')
axes[2].set_title("rp_2 spectogram")
plt.tight_layout(pad=1.2)
plt.savefig("spectogram.pdf")
plt.show()

path = 'Z:\\traces\\CMOS\\'

mat = hdf5storage.loadmat(path + 'fullssa.mat')
# np.complex64
traces = (mat['traces'])
Y = mat['Y']

Y = np.reshape(Y, (np.size(Y), ))

SAMPLES = np.shape(traces)[1]
Queries = np.shape(traces)[0]
from main_fft import plot_roc

plot_roc(traces, Y, None, 40, 'fullssa_base', [0, 1000])
#plot_roc(traces, Y, "Z:\\filters\\shuffled_rp_8\\filter_BPF.mat", 40, 'rp8_bpf', [480, 800])
#plot_roc(traces, Y, "Z:\\filters\\shuffled_rp_8\\filter_SNR_FFT.mat", 40, 'rp8_snr', [480, 800])
exit(0)
コード例 #43
0
from grasp.process.utils import getRawData, get_trigger
'''
Function: pick up the non-signal channel and trigger channel by plotting and visual check.
Usage:
1,use - to scale down the plot
'''

sid = 16
sessions = 4  # 4 sessions
movements = 4  # 4 movements
session = 0  # evaluate the channle on one session

print("Read raw data from disk.")
seegfile = data_dir + 'PF' + str(sid) + '/SEEG_Data/PF' + str(
    sid) + '_F_' + str(session + 1) + '.mat'
mat = hdf5storage.loadmat(seegfile)
raw = mat['Data']
#triggerRaw = mat['Data'][29, :]
fs = int(mat['Fs'][0][0])
chnRaw = mat[
    'ChnName']  # total channels: len(chnRaw)-->update the channel_settings.py
channels = list(chnRaw)
while not isinstance(channels[0], np.str_):
    channels = [channels[i][0] for i in range(len(chnRaw))]

#channels = np.asarray([chnRaw[i][0][0][0] for i in range(len(chnRaw))])  # list with len=126
#ch_names = [channelsName.strip() for channelsName in channels]
ch_index_str = [
    str(chi) + '_' + channels[chi].strip() for chi in [*range(len(channels))]
]
ch_index = [*range(len(channels))]  #147
コード例 #44
0
 def test_append(self):
     self.assertEqual(self.FileSample.fname, 'test_in_progress.mat')
     self.assertEqual(self.FileSample.n, 0)
     self.FileSample.append(np.matrix([[1], [2], [1], [2], [1], [1]]),
                            np.matrix([1]), 10)
     self.assertEqual(self.FileSample.n, 10)
     self.assertTrue(os.path.exists(self.FileSample.fname))
     from hdf5storage import loadmat
     x = loadmat(self.FileSample.fname)
     self.assertEqual(sorted(
         x.keys()), ['LnPDF_1', 'MTSpace_1', 'i', 'n', 'non_zero_samples'])
     self.assertEqual(x['n'], 10)
     self.FileSample.append(np.matrix([[1], [2], [1], [2], [1], [1]]),
                            np.matrix([1]), 20)
     self.assertEqual(self.FileSample.n, 30)
     self.assertTrue(os.path.exists(self.FileSample.fname))
     x = loadmat(self.FileSample.fname)
     self.assertEqual(sorted(x.keys()), [
         'LnPDF_1', 'LnPDF_2', 'MTSpace_1', 'MTSpace_2', 'i', 'n',
         'non_zero_samples'
     ])
     self.assertEqual(x['n'], 30)
     self.FileSample.append(np.matrix([[1], [2], [1], [2], [1], [1]]),
                            np.matrix([-np.inf]), 20)
     self.assertEqual(self.FileSample.n, 50)
     x = loadmat(self.FileSample.fname)
     self.assertEqual(sorted(x.keys()), [
         'LnPDF_1', 'LnPDF_2', 'MTSpace_1', 'MTSpace_2', 'i', 'n',
         'non_zero_samples'
     ])
     self.assertEqual(x['n'], 50)
     self.tearDown()
     self.setUp()
     self.assertEqual(self.FileSample.fname, 'test_in_progress.mat')
     self.assertEqual(self.FileSample.n, 0)
     self.FileSample.append(np.matrix([[1], [2], [1], [2], [1], [1]]),
                            np.matrix([1]), 10,
                            [{
                                'mu': np.array([[1, 2], [2, 1]]),
                                'sigma': np.array([[0, 0.1], [0.1, 0]])
                            }])
     self.assertEqual(self.FileSample.n, 10)
     self.assertTrue(os.path.exists(self.FileSample.fname))
     from hdf5storage import loadmat
     x = loadmat(self.FileSample.fname)
     self.assertEqual(sorted(x.keys()), [
         'LnPDF_1', 'MTSpace_1', 'i', 'n', 'non_zero_samples',
         'scale_factor_1'
     ])
     self.assertEqual(x['n'], 10)
     self.FileSample.append(np.matrix([[1], [2], [1], [2], [1], [1]]),
                            np.matrix([1]), 20,
                            [{
                                'mu': np.array([[1, 2], [2, 1]]),
                                'sigma': np.array([[0, 0.1], [0.1, 0]])
                            }])
     self.assertEqual(self.FileSample.n, 30)
     self.assertTrue(os.path.exists(self.FileSample.fname))
     x = loadmat(self.FileSample.fname)
     self.assertEqual(sorted(x.keys()), [
         'LnPDF_1', 'LnPDF_2', 'MTSpace_1', 'MTSpace_2', 'i', 'n',
         'non_zero_samples', 'scale_factor_1', 'scale_factor_2'
     ])
     self.assertEqual(x['n'], 30)
     self.FileSample.append(np.matrix([[1], [2], [1], [2], [1], [1]]),
                            np.matrix([-np.inf]), 20,
                            [{
                                'mu': np.array([[1, 2], [2, 1]]),
                                'sigma': np.array([[0, 0.1], [0.1, 0]])
                            }])
     self.assertEqual(self.FileSample.n, 50)
     x = loadmat(self.FileSample.fname)
     self.assertEqual(sorted(x.keys()), [
         'LnPDF_1', 'LnPDF_2', 'MTSpace_1', 'MTSpace_2', 'i', 'n',
         'non_zero_samples', 'scale_factor_1', 'scale_factor_2'
     ])
     self.assertEqual(x['n'], 50)
コード例 #45
0
## filter to deal with some 0 reflectances
h = np.ones((3, 3))
h[1, 1] = 0

## loop over all images
for f in list_file:
    cube_name = re.findall('\d+', f)[0]
    path_f = indexPath + '\\' + cube_name
    if not os.path.exists(path_f):
        os.makedirs(path_f)

#    pdb.set_trace()
    indices = dict()
    R = dict()
    data = hdf5storage.loadmat(os.path.join(hyperPath, f))['data']
    f_ = f.replace('_rd_rf.mat', '')
    for color in list_wv_color.keys():
        R.update({
            color:
            np.average(data[:, :,
                            idx_colors[color][0]:idx_colors[color][1] + 1],
                       axis=2)
        })

#    R_wv = {}
    for wv in list_wv:
        R[wv] = np.average(data[:, :, idx_wv[wv]], axis=2)
#    pdb.set_trace()

# getting rid of the "divided by zero" issue
コード例 #46
0
    'N_evals': 30,  #  Max epochs
    'L_rate_N1N2': 2e-4,
    'W_N1': 1e-4,
    'W_N2': 1e-4,
    'enc_layers': [300, 200],
    'dec_layers': [200],
    'AE_activation': 'tanh',
    'N1_layers': [80, 160, 320, 640, 320, 160, 80],
    'N2_layers': [80, 160, 320, 640, 320, 160, 80],
    'datastep': 10,
    'bat_n_all': True,
    'actual_epoch': 0,
}

## Load Data
PI = hdf5storage.loadmat('./data/PI_coma.mat')  # Point clouds Indexes
PI = np.squeeze(PI['PI']) - 1

data = hdf5storage.loadmat('./data/coma_FEM.mat')  # Load dataset
pix1 = data['meshes_noeye'].reshape(data['meshes_noeye'].shape[0],
                                    data['meshes_noeye'].shape[1], 3).astype(
                                        'float32')  # Vertices of the meshes
pix = pix1[:, PI]
outliers = np.asarray([6710, 6792, 6980]) - 1
remeshed = np.asarray(
    [820, 1200, 7190, 11700, 12500, 14270, 15000, 16300, 19180, 20000]) - 1
save_every = 100


# Define Chamfer
def distance_matrix(array1, array2):
コード例 #47
0
        other_mirror_loss = (np.sum(np.square(x_target_anchor_features - x_other_pair_features), axis=-1) + \
        np.sum(np.square(x_target_anchor_features - x_other_mirror_features), axis=-1)) / 2
    else:
        other_mirror_loss = np.sum(np.square(x_target_anchor_features -
                                             x_other_pair_features),
                                   axis=-1)

    # Store losses themselves
    global_target_loss.append(target_mirror_loss)
    global_int_loss.append(other_mirror_loss)

    # Get local directory
    local_dir = result_dir + '/target%d_sample%d' % (target_person,
                                                     best_anchor_idx)
    try:
        contents = hdf5storage.loadmat(local_dir + '/attack.mat')
        if mask_style == 'eyeglasses':
            # Load the results of the indirect attack (only for eyeglasses currently, can be anything else)
            try:
                contents_indirect = hdf5storage.loadmat(
                    'paper_attacks_indirect_eyeglasses/%s/%s/%s/target%d_sample%d/attack.mat'
                    % (submeta_folder, core_folder, core_weights,
                       target_person, best_anchor_idx))
                adv_indirect_loss = contents_indirect['adv_true_feature_loss']
                global_indirect_loss.append(adv_indirect_loss)

            except:
                print('Skipping target %d! No indirect attack found' %
                      target_person)
    except:
        print('Skipping target %d!' % target_person)
コード例 #48
0
def main():
    ############## seeg data ##########
    sid = 10  # 4
    fs = 1000
    class_number = 5
    Session_num, UseChn, EmgChn, TrigChn, activeChan = get_channel_setting(sid)

    loadPath = data_dir + 'preprocessing' + '/P' + str(
        sid) + '/preprocessing2.mat'
    mat = hdf5storage.loadmat(loadPath)
    data = mat['Datacell']
    channelNum = int(mat['channelNum'][0, 0])
    data = np.concatenate((data[0, 0], data[0, 1]), 0)
    del mat
    # standardization
    # no effect. why?
    if 1 == 1:
        chn_data = data[:, -3:]
        data = data[:, :-3]
        scaler = StandardScaler()
        scaler.fit(data)
        data = scaler.transform((data))
        data = np.concatenate((data, chn_data), axis=1)

    # stim0 is trigger channel, stim1 is trigger position calculated from EMG signal.
    chn_names = np.append(["seeg"] * len(UseChn), ["stim0", "emg", "stim1"])
    chn_types = np.append(["seeg"] * len(UseChn), ["stim", "emg", "stim"])
    info = mne.create_info(ch_names=list(chn_names),
                           ch_types=list(chn_types),
                           sfreq=fs)
    raw = mne.io.RawArray(data.transpose(), info)

    # gesture/events type: 1,2,3,4,5
    events0 = mne.find_events(raw, stim_channel='stim0')
    events1 = mne.find_events(raw, stim_channel='stim1')
    # events number should start from 0: 0,1,2,3,4, instead of 1,2,3,4,5
    events0 = events0 - [0, 0, 1]
    events1 = events1 - [0, 0, 1]

    # print(events[:5])  # show the first 5
    # Epoch from 4s before(idle) until 4s after(movement) stim1.
    raw = raw.pick(["seeg"])
    epochs = mne.Epochs(raw, events1, tmin=0, tmax=4, baseline=None)
    # or epoch from 0s to 4s which only contain movement data.
    # epochs = mne.Epochs(raw, events1, tmin=0, tmax=4,baseline=None)

    epoch1 = epochs['0'].get_data(
    )  # 20 trials. 8001 time points per trial for 8s.
    epoch2 = epochs['1'].get_data()
    epoch3 = epochs['2'].get_data()
    epoch4 = epochs['3'].get_data()
    epoch5 = epochs['4'].get_data()
    list_of_epochs = [epoch1, epoch2, epoch3, epoch4, epoch5]
    total_len = list_of_epochs[0].shape[2]

    # validate=test=2 trials
    trial_number = [list(range(epochi.shape[0])) for epochi in list_of_epochs
                    ]  # [ [0,1,2,...19],[0,1,2...19],... ]
    test_trials = [random.sample(epochi, 2) for epochi in trial_number]
    # len(test_trials[0]) # test trials number
    trial_number_left = [
        np.setdiff1d(trial_number[i], test_trials[i])
        for i in range(class_number)
    ]

    val_trials = [
        random.sample(list(epochi), 2) for epochi in trial_number_left
    ]
    train_trials = [
        np.setdiff1d(trial_number_left[i], val_trials[i]).tolist()
        for i in range(class_number)
    ]

    # no missing trials
    assert [
        sorted(test_trials[i] + val_trials[i] + train_trials[i])
        for i in range(class_number)
    ] == trial_number

    test_epochs = [
        epochi[test_trials[clas], :, :]
        for clas, epochi in enumerate(list_of_epochs)
    ]  # [ epoch0,epoch1,epch2,epoch3,epoch4 ]
    val_epochs = [
        epochi[val_trials[clas], :, :]
        for clas, epochi in enumerate(list_of_epochs)
    ]
    train_epochs = [
        epochi[train_trials[clas], :, :]
        for clas, epochi in enumerate(list_of_epochs)
    ]

    wind = 500
    stride = 500
    X_train = []
    y_train = []
    X_val = []
    y_val = []
    X_test = []
    y_test = []

    for clas, epochi in enumerate(test_epochs):
        Xi, y = slide_epochs(epochi, clas, wind, stride)
        assert Xi.shape[0] == len(y)
        X_test.append(Xi)
        y_test.append(y)
    X_test = np.concatenate(X_test, axis=0)  # (1300, 63, 500)
    y_test = np.asarray(y_test)
    y_test = np.reshape(y_test, (-1, 1))  # (5, 270)

    for clas, epochi in enumerate(val_epochs):
        Xi, y = slide_epochs(epochi, clas, wind, stride)
        assert Xi.shape[0] == len(y)
        X_val.append(Xi)
        y_val.append(y)
    X_val = np.concatenate(X_val, axis=0)  # (1300, 63, 500)
    y_val = np.asarray(y_val)
    y_val = np.reshape(y_val, (-1, 1))  # (5, 270)

    for clas, epochi in enumerate(train_epochs):
        Xi, y = slide_epochs(epochi, clas, wind, stride)
        assert Xi.shape[0] == len(y)
        X_train.append(Xi)
        y_train.append(y)
    X_train = np.concatenate(X_train, axis=0)  # (1300, 63, 500)
    y_train = np.asarray(y_train)
    y_train = np.reshape(y_train, (-1, 1))  # (5, 270)
    chn_num = X_train.shape[1]

    train_set = myDataset(X_train, y_train)
    val_set = myDataset(X_val, y_val)
    test_set = myDataset(X_test, y_test)

    batch_size = 32
    train_loader = DataLoader(dataset=train_set,
                              batch_size=batch_size,
                              shuffle=True,
                              pin_memory=False)
    val_loader = DataLoader(dataset=val_set,
                            batch_size=batch_size,
                            shuffle=True,
                            pin_memory=False)
    test_loader = DataLoader(dataset=test_set,
                             batch_size=batch_size,
                             shuffle=True,
                             pin_memory=False)

    ########## end  seeg #########################
    global args, enable_cuda
    ################################################################ INIT #################################################################################

    args = parser.parse_args()

    cwd = os.getcwd()
    #dpath=os.path.dirname(cwd)
    dpath = '/Volumes/Samsung_T5/data/braindecode/'
    result_dir = dpath + 'result/'
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    #Paths for data, model and checkpoint
    data_path = os.path.join(dpath, 'Data/')
    model_save_path = os.path.join(dpath, 'Models',
                                   'Model_GumbelregHighgamma_M' + str(args.M))
    checkpoint_path = os.path.join(
        dpath, 'Models', 'Checkpoint_GumbelregHighgamma_M' + str(args.M))
    if not os.path.isdir(os.path.join(dpath, 'Models')):
        os.makedirs(os.path.join(dpath, 'Models'))

    #Check if CUDA is available
    enable_cuda = torch.cuda.is_available()
    if (args.verbose):
        print('GPU computing: ', enable_cuda)

    #Set random seed
    if (args.seed == 0):
        args.seed = randint(1, 99999)

    #Initialize devices with random seed
    torch.manual_seed(args.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    training_accs = []
    val_accs = []
    test_accs = []

    #Create a vector of length epochs, decaying start_value to end_value exponentially, reaching end_value at end_epoch
    def exponential_decay_schedule(start_value, end_value, epochs, end_epoch):
        t = torch.FloatTensor(torch.arange(0.0, epochs))
        p = torch.clamp(t / end_epoch, 0, 1)
        out = start_value * torch.pow(end_value / start_value, p)

        return out

    #Network loss function
    def loss_function(output, target, model, lamba, weight_decay):
        l = nn.CrossEntropyLoss()
        sup_loss = l(output, target)
        reg = model.regularizer(lamba, weight_decay)

        return sup_loss, reg

    #Create schedule for temperature and regularization threshold
    temperature_schedule = exponential_decay_schedule(args.start_temp,
                                                      args.end_temp,
                                                      args.epochs,
                                                      int(args.epochs * 3 / 4))
    thresh_schedule = exponential_decay_schedule(10.0, 1.1, args.epochs,
                                                 args.epochs)

    #Load data
    num_subjects = 5
    input_dim = [44, 1125]
    #train_loader1,val_loader1,test_loader1 = all_subject_loader_HGD(batch_size=args.batch_size,train_split=args.train_split,path=data_path,num_subjects=num_subjects)

    ################################################################ SUBJECT-INDEPENDENT CHANNEL SELECTION #################################################################################

    if (args.verbose):
        print('Start training')

    torch.manual_seed(args.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    #Instantiate model
    model = SelectionNet(input_dim, args.M).float()
    if (enable_cuda):
        model.cuda()
    model.set_freeze(False)

    optimizer = torch.optim.Adam(model.parameters(), args.lr)

    prev_val_loss = 100
    patience_timer = 0
    early_stop = False
    epoch = 0
    fig, ax = plt.subplots()

    while epoch in range(args.epochs) and (not early_stop):

        #Update temperature and threshold
        model.set_thresh(thresh_schedule[epoch])
        model.set_temperature(temperature_schedule[epoch])

        #Perform training step
        train(train_loader, model, loss_function, optimizer, epoch,
              args.weight_decay, args.lamba, args.gradacc, args.verbose)
        val_loss = validate(val_loader, model, loss_function, epoch,
                            args.weight_decay, args.lamba, args.verbose)
        tr_acc, val_acc, test_acc = test(train_loader, val_loader, test_loader,
                                         model, loss_function,
                                         args.weight_decay, args.verbose)

        #Extract selection neuron entropies, current selections and probability distributions
        H, sel, probas = model.monitor()
        ax.plot(probas.detach().numpy())
        fig.savefig(result_dir + 'prob_dist' + str(epoch) + '.png')
        ax.clear()
        #fig.clear()

        #If selection convergence is reached, enable early stopping scheme
        if ((torch.mean(H.data) <= args.entropy_lim)
                and (val_loss > prev_val_loss - args.stop_delta)):
            patience_timer += 1
            if (args.verbose):
                print('Early stopping timer ', patience_timer)
            if (patience_timer == args.patience):
                early_stop = True
        else:
            patience_timer = 0
            H, sel, probas = model.monitor()
            torch.save(model.state_dict(), checkpoint_path)
            prev_val_loss = val_loss

        epoch += 1

    if (args.verbose):
        print('Channel selection finished')

    #Store subject independent model
    model.load_state_dict(torch.load(checkpoint_path))
    pretrained_path = str(model_save_path +
                          'all_subjects_channels_selected.pt')
    torch.save(model.state_dict(), pretrained_path)

    ################################################################ SUBJECT FINETUNING  #################################################################################
    ## freeze the selection layer and train the model other part
    if (args.verbose):
        print('Start subject specific training')

    for k in range(1, num_subjects + 1):

        if (args.verbose):
            print('Start training for subject ' + str(k))

        torch.manual_seed(args.seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

        #Load subject independent model and freeze selection neurons
        model = SelectionNet(input_dim, args.M)
        model.load_state_dict(torch.load(pretrained_path))
        if (enable_cuda):
            model.cuda()
        model.set_freeze(True)

        #Load subject dependent data
        train_loader, val_loader, test_loader = within_subject_loader_HGD(
            subject=k,
            batch_size=args.batch_size,
            train_split=args.train_split,
            path=data_path)

        optimizer = torch.optim.Adam(model.parameters(), args.lr)

        prev_val_loss = 100
        patience_timer = 0
        early_stop = False
        epoch = 0
        while epoch in range(args.epochs) and (not early_stop):

            #Perform train step
            train(train_loader, model, loss_function, optimizer, epoch,
                  args.weight_decay, args.lamba, args.gradacc, args.verbose)
            val_loss = validate(val_loader, model, loss_function, epoch,
                                args.weight_decay, args.lamba, args.verbose)
            tr_acc, val_acc, test_acc = test(train_loader, val_loader,
                                             test_loader, model, loss_function,
                                             args.weight_decay, args.verbose)

            #Extract selection neuron entropies, current selections and probability distributions
            H, sel, probas = model.monitor()

            #Perform early stopping
            if (val_loss > prev_val_loss - args.stop_delta):
                patience_timer += 1
                if (args.verbose):
                    print('Early stopping timer ', patience_timer)
                if (patience_timer == args.patience):
                    early_stop = True
            else:
                patience_timer = 0
                torch.save(model.state_dict(), checkpoint_path)
                prev_val_loss = val_loss

            epoch += 1

        #Store model with lowest validation loss
        model.load_state_dict(torch.load(checkpoint_path))
        path = str(model_save_path + 'finished_subject' + str(k) + '.pt')
        torch.save(model.state_dict(), path)

        #Evaluate model
        tr_acc, val_acc, test_acc = test(train_loader, val_loader, test_loader,
                                         model, loss_function,
                                         args.weight_decay, args.verbose)
        training_accs.append(tr_acc)
        val_accs.append(val_acc)
        test_accs.append(test_acc)

################################################################ TERMINATION  #################################################################################

    print('Selection', sel.data)
    print('Training accuracies', training_accs)
    print('Validation accuracies', val_accs)
    print('Testing accuracies', test_accs)

    tr_med = statistics.median(training_accs)
    val_med = statistics.median(val_accs)
    test_med = statistics.median(test_accs)
    tr_mean = statistics.mean(training_accs)
    val_mean = statistics.mean(val_accs)
    test_mean = statistics.mean(test_accs)

    print('Training median accuracy', tr_med)
    print('Validation median accuracy', val_med)
    print('Testing median accuracy', test_med)
    print('Training mean accuracy', tr_mean)
    print('Validation mean accuracy', val_mean)
    print('Testing mean accuracy', test_mean)
コード例 #49
0
ファイル: call_SeisCL3D.py プロジェクト: gfabieno/SeisCL_100
#________________Launch simulation______________
model['vp'][20:40,10:20,5:10]= 3550
model['taup'][20:40,10:20,5:10]= 0.03
h5mat.savemat(filenames['csts'], csts , appendmat=False, format='7.3', store_python_metadata=True, truncate_existing=True)
h5mat.savemat(filenames['model'], model , appendmat=False, format='7.3', store_python_metadata=True, truncate_existing=True)

filepath=os.getcwd()
cmdlaunch='cd ../src/; mpirun -np 1 ./SeisCL_MPI '+filepath+'/SeisCL > ../tests/out 2>../tests/err'
print(cmdlaunch)
pipes = subprocess.Popen(cmdlaunch,stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True)
while (pipes.poll() is None):
    time.sleep(1)
sys.stdout.write('Forward calculation completed \n')
sys.stdout.flush()

dout = h5mat.loadmat(filenames['dout'])
din={}
din['src_pos']=dout['src_pos']
din['rec_pos']=dout['rec_pos']
din['vx0']=dout['vxout']
din['vy0']=dout['vyout']
din['vz0']=dout['vzout']
h5mat.savemat(filenames['din'], din , appendmat=False, format='7.3', store_python_metadata=True, truncate_existing=True)


##________________Calculate gradient______________
#model['vp'][20:40,10:20,5:10]= 3500
#model['taup'][20:40,10:20,5:10]= 0.02
#csts['gradout']=1
#csts['resout']=1
#csts['gradfreqs']=np.append(csts['gradfreqs'], csts['f0'])
コード例 #50
0
              store_python_metadata=True,
              truncate_existing=True)

filepath = os.getcwd()
cmdlaunch = 'cd ../src/; mpirun -np 1 ./SeisCL_MPI ' + filepath + '/SeisCL > ../tests/out 2>../tests/err'
print(cmdlaunch)
pipes = subprocess.Popen(cmdlaunch,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE,
                         shell=True)
while (pipes.poll() is None):
    time.sleep(1)
sys.stdout.write('Forward calculation completed \n')
sys.stdout.flush()

dout = h5mat.loadmat(filenames['dout'])
din = {}
din['src_pos'] = dout['src_pos']
din['rec_pos'] = dout['rec_pos']
din['vx'] = np.transpose(dout['vxout'])
din['vz'] = np.transpose(dout['vzout'])
#din['p']=np.transpose(dout['pout'])
h5mat.savemat(filenames['din'],
              din,
              appendmat=False,
              format='7.3',
              store_python_metadata=True,
              truncate_existing=True)

#________________Calculate gradient______________
csts['MOVOUT'] = 0
def channel_Mat2Array(path):
    channel = hdf5storage.loadmat(path)
    return channel['result'][0, 0]['results'][0, 0]['pixel'][0, 0]
コード例 #52
0
tf.reset_default_graph() # debugging, clear all tf variables
#tf.enable_eager_execution() # placeholders are not compatible

import model_convNet
import scipy.io


_FLOATX = tf.float32 



## LOAD DATA
mainFile = '/Users/seshads1/Documents/code/ACLEW/cnn_wavenet_like/1/'
#mainFile = '/l/seshads1/code/syllCount/syll_cout/'
loadFile = mainFile + 'pyDat_test.mat'
loaddata = hdf5storage.loadmat(loadFile)
x_test = loaddata['x_test'] # test features
t_test = loaddata['t_test'] # test target labels


## PARAMETERS
residual_channels = 256
filter_width = 5
dilations = [1, 1, 1, 1, 1, 1, 1]
input_channels = x_test[0][0].shape[2]
no_classes = 3
postnet_channels= 256

S = model_convNet.CNET(name='S', 
                       input_channels=input_channels,
                       output_channels = no_classes,
コード例 #53
0
import pandas as pd
import numpy as np

import sys
import scipy
from scipy import stats
import hdf5storage
options = hdf5storage.Options(oned_as = 'column', matlab_compatible = True, action_for_matlab_incompatible = 'error')

input_list=["wholebrain_t1t2","wholebrain_dbm"] #MODIFY to match the filenames of the wholebrain files created in your sample_extract*py scripts. should be name of the file without the .mat extension

z_dict = {}

for file in input_list:
    fname = file + ".mat"
    res = hdf5storage.loadmat(fname) #load raw data
    x_z = np.asarray(stats.zscore(res['X'],axis=None)) #zscore, across both subjects and vertices
    #x_z_s = x_z - np.min(x_z) #shift so all positive
    z_dict[file] = x_z


#concatenate each zscored shifted matrix together
#forms vertex X subject*n_metrics matrix
file=input_list[0]
#print(file)
wb_z_all = z_dict[file]
for file in input_list[1:]:
    print(file)
    wb_z_all = np.concatenate((wb_z_all, z_dict[file]),axis=1)

wb_z_s_all = wb_z_all - np.min(wb_z_all)
コード例 #54
0
# itgidx
filepath1 = 'itgidx.xlsx'
itgidx = np.array(pd.read_excel(filepath1, header=None))

# signal
loadlist = list(['F:\\Miniscope imaging data\\Analysis\\201808\\GPF201808_#1.4_CtxA\\GPF201808_#1.4_CtxA_day1_SignalMatrix.mat', \
                'F:\\Miniscope imaging data\\Analysis\\201808\\GPF201808_#1.4_CtxA\\GPF201808_#1.4_CtxA_day2_SignalMatrix.mat', \
                'F:\\Miniscope imaging data\\Analysis\\201808\\GPF201808_#1.4_CtxA\\GPF201808_#1.4_CtxA_day3_SignalMatrix.mat', \
                'F:\\Miniscope imaging data\\Analysis\\201808\\GPF201808_#1.4_CtxA\\GPF201808_#1.4_CtxA_day4_SignalMatrix.mat'])
# 추후 수정요망

msPeak_signal = np.zeros((len(loadlist), 185, 8330))  # 추후 수정요망
signal_matrix = np.zeros((itgidx.shape[0], itgidx.shape[1], 8330))

for filename in np.arange(len(loadlist)):
    mat_tmp = hdf5storage.loadmat(loadlist[filename])
    mat_signal_tmp = mat_tmp['msPeak_signal']

    for neuron in np.arange(mat_signal_tmp.shape[0]):
        itg_index = list(itgidx[:, filename]).index(neuron +
                                                    1)  # matlab to python
        signal_matrix[itg_index, filename,
                      0:mat_signal_tmp.shape[1]] = mat_signal_tmp[neuron, :]
        if not (filename == 0) and itg_index == 0:
            print(filename, neuron, itg_index)

ans = np.sum(signal_matrix, axis=2)

# In[] freezing index load for day3
syn = 14  # notebook 1 frame 때, miniscope frame
tr = 1.3  # time range
コード例 #55
0
# In[255]:


fpo = getpath('ORACLES')


# In[256]:


fpo


# In[263]:


model = hs.loadmat(fpo+'model/v3_ORACLES_lut.mat')


# In[259]:


model.keys()


# In[267]:


1.0/np.cos(model['sza']*np.pi/180.0)


# In[270]:
コード例 #56
0
# In[4]:


fp =getpath('ORACLES')#'C:/Userds/sleblan2/Research/ORACLES/'
fp


# # Load files

# ## Load the 2016 data

# In[5]:


ar6 = hs.loadmat(fp+'/aod_ict/v8/R3/all_aod_ict_R3_2016.mat')


# In[410]:


ar6['flac'] = (ar6['qual_flag']==0)&(ar6['flag_acaod']==1)
ar6['flacr'] = (ar6['qual_flag']==0)&(ar6['flag_acaod']==1)&(ar6['fl_routine'])
ar6['flaco'] = (ar6['qual_flag']==0)&(ar6['flag_acaod']==1)&~(ar6['fl_routine'])


# In[81]:


ar6['flr'] = (ar6['qual_flag']==0) & (ar6['fl_routine'])
ar6['flo'] = (ar6['qual_flag']==0) & ~(ar6['fl_routine'])
コード例 #57
0
import numpy as np
from hdf5storage import loadmat, savemat
from os.path import join, isfile
from scipy.ndimage.morphology import distance_transform_edt as distance_transform

DS_PATH = 'D:/datasets/processed/voc2012' 
ds_info = loadmat(join(DS_PATH, 'dataset_info.mat'))

if not isfile('incorr_dist_hist.mat'):
	import sys
	imset = sys.argv[1].lower().capitalize()

	PRED_PATH = join(DS_PATH, 'Deeplab_Prediction', imset)
	GT_PATH = join(DS_PATH, 'Truth', imset)

	num_img = ds_info['num_'+imset.lower()]
	num_classes = 20
	bg_class = 0

	LOGIT_FMT = imset.lower()+'_%06d_logits.mat'
	LOGIT_MAT_NAME = 'logits_img'

	GT_FMT = imset.lower()+'_%06d_pixeltruth.mat'
	GT_MAT_NAME = 'truth_img'

	res = .5
	nb = int(50./res)
	dist_hist = np.zeros((nb), dtype=np.uint64)

	for idx in range(1, num_img+1):
		logit_im = loadmat(join(PRED_PATH, LOGIT_FMT % idx))[LOGIT_MAT_NAME][...,1:].reshape(-1, num_classes)
コード例 #58
0
ファイル: main_dpir_deblur.py プロジェクト: wuzhan11/DPIR
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------

    noise_level_img = 7.65 / 255.0  # default: 0, noise level for LR image
    noise_level_model = noise_level_img  # noise level of model, default 0
    model_name = 'drunet_gray'  # 'drunet_gray' | 'drunet_color' | 'ircnn_gray' | 'ircnn_color'
    testset_name = 'Set3C'  # test set,  'set5' | 'srbsd68'
    x8 = True  # default: False, x8 to boost performance
    iter_num = 8  # number of iterations
    modelSigma1 = 49
    modelSigma2 = noise_level_model * 255.

    show_img = False  # default: False
    save_L = True  # save LR image
    save_E = True  # save estimated image
    save_LEH = False  # save zoomed LR, E and H images
    border = 0

    # --------------------------------
    # load kernel
    # --------------------------------

    kernels = hdf5storage.loadmat(os.path.join('kernels',
                                               'Levin09.mat'))['kernels']

    sf = 1
    task_current = 'deblur'  # 'deblur' for deblurring
    n_channels = 3 if 'color' in model_name else 1  # fixed
    model_zoo = 'model_zoo'  # fixed
    testsets = 'testsets'  # fixed
    results = 'results'  # fixed
    result_name = testset_name + '_' + task_current + '_' + model_name
    model_path = os.path.join(model_zoo, model_name + '.pth')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    torch.cuda.empty_cache()

    # ----------------------------------------
    # L_path, E_path, H_path
    # ----------------------------------------

    L_path = os.path.join(testsets,
                          testset_name)  # L_path, for Low-quality images
    E_path = os.path.join(results, result_name)  # E_path, for Estimated images
    util.mkdir(E_path)

    logger_name = result_name
    utils_logger.logger_info(logger_name,
                             log_path=os.path.join(E_path,
                                                   logger_name + '.log'))
    logger = logging.getLogger(logger_name)

    # ----------------------------------------
    # load model
    # ----------------------------------------

    if 'drunet' in model_name:
        from models.network_unet import UNetRes as net
        model = net(in_nc=n_channels + 1,
                    out_nc=n_channels,
                    nc=[64, 128, 256, 512],
                    nb=4,
                    act_mode='R',
                    downsample_mode="strideconv",
                    upsample_mode="convtranspose")
        model.load_state_dict(torch.load(model_path), strict=True)
        model.eval()
        for _, v in model.named_parameters():
            v.requires_grad = False
        model = model.to(device)
    elif 'ircnn' in model_name:
        from models.network_dncnn import IRCNN as net
        model = net(in_nc=n_channels, out_nc=n_channels, nc=64)
        model25 = torch.load(model_path)
        former_idx = 0

    logger.info('model_name:{}, image sigma:{:.3f}, model sigma:{:.3f}'.format(
        model_name, noise_level_img, noise_level_model))
    logger.info('Model path: {:s}'.format(model_path))
    logger.info(L_path)
    L_paths = util.get_image_paths(L_path)

    test_results_ave = OrderedDict()
    test_results_ave['psnr'] = []  # record average PSNR for each kernel

    for k_index in range(kernels.shape[1]):

        logger.info('-------k:{:>2d} ---------'.format(k_index))
        test_results = OrderedDict()
        test_results['psnr'] = []
        k = kernels[0, k_index].astype(np.float64)
        util.imshow(k) if show_img else None

        for idx, img in enumerate(L_paths):

            # --------------------------------
            # (1) get img_L
            # --------------------------------

            img_name, ext = os.path.splitext(os.path.basename(img))
            img_H = util.imread_uint(img, n_channels=n_channels)
            img_H = util.modcrop(img_H, 8)  # modcrop

            img_L = ndimage.filters.convolve(img_H,
                                             np.expand_dims(k, axis=2),
                                             mode='wrap')
            util.imshow(img_L) if show_img else None
            img_L = util.uint2single(img_L)

            np.random.seed(seed=0)  # for reproducibility
            img_L += np.random.normal(0, noise_level_img,
                                      img_L.shape)  # add AWGN

            # --------------------------------
            # (2) get rhos and sigmas
            # --------------------------------

            rhos, sigmas = pnp.get_rho_sigma(sigma=max(0.255 / 255.,
                                                       noise_level_model),
                                             iter_num=iter_num,
                                             modelSigma1=modelSigma1,
                                             modelSigma2=modelSigma2,
                                             w=1.0)
            rhos, sigmas = torch.tensor(rhos).to(device), torch.tensor(
                sigmas).to(device)

            # --------------------------------
            # (3) initialize x, and pre-calculation
            # --------------------------------

            x = util.single2tensor4(img_L).to(device)

            img_L_tensor, k_tensor = util.single2tensor4(
                img_L), util.single2tensor4(np.expand_dims(k, 2))
            [k_tensor, img_L_tensor] = util.todevice([k_tensor, img_L_tensor],
                                                     device)
            FB, FBC, F2B, FBFy = sr.pre_calculate(img_L_tensor, k_tensor, sf)

            # --------------------------------
            # (4) main iterations
            # --------------------------------

            for i in range(iter_num):

                # --------------------------------
                # step 1, FFT
                # --------------------------------

                tau = rhos[i].float().repeat(1, 1, 1, 1)
                x = sr.data_solution(x, FB, FBC, F2B, FBFy, tau, sf)

                if 'ircnn' in model_name:
                    current_idx = np.int(
                        np.ceil(sigmas[i].cpu().numpy() * 255. / 2.) - 1)

                    if current_idx != former_idx:
                        model.load_state_dict(model25[str(current_idx)],
                                              strict=True)
                        model.eval()
                        for _, v in model.named_parameters():
                            v.requires_grad = False
                        model = model.to(device)
                    former_idx = current_idx

                # --------------------------------
                # step 2, denoiser
                # --------------------------------

                if x8:
                    x = util.augment_img_tensor4(x, i % 8)

                if 'drunet' in model_name:
                    x = torch.cat((x, sigmas[i].float().repeat(
                        1, 1, x.shape[2], x.shape[3])),
                                  dim=1)
                    x = utils_model.test_mode(model,
                                              x,
                                              mode=2,
                                              refield=32,
                                              min_size=256,
                                              modulo=16)
                elif 'ircnn' in model_name:
                    x = model(x)

                if x8:
                    if i % 8 == 3 or i % 8 == 5:
                        x = util.augment_img_tensor4(x, 8 - i % 8)
                    else:
                        x = util.augment_img_tensor4(x, i % 8)

            # --------------------------------
            # (3) img_E
            # --------------------------------

            img_E = util.tensor2uint(x)
            if n_channels == 1:
                img_H = img_H.squeeze()

            if save_E:
                util.imsave(
                    img_E,
                    os.path.join(
                        E_path, img_name + '_k' + str(k_index) + '_' +
                        model_name + '.png'))

            # --------------------------------
            # (4) img_LEH
            # --------------------------------

            if save_LEH:
                img_L = util.single2uint(img_L)
                k_v = k / np.max(k) * 1.0
                k_v = util.single2uint(np.tile(k_v[..., np.newaxis],
                                               [1, 1, 3]))
                k_v = cv2.resize(k_v, (3 * k_v.shape[1], 3 * k_v.shape[0]),
                                 interpolation=cv2.INTER_NEAREST)
                img_I = cv2.resize(img_L,
                                   (sf * img_L.shape[1], sf * img_L.shape[0]),
                                   interpolation=cv2.INTER_NEAREST)
                img_I[:k_v.shape[0], -k_v.shape[1]:, :] = k_v
                img_I[:img_L.shape[0], :img_L.shape[1], :] = img_L
                util.imshow(np.concatenate([img_I, img_E, img_H], axis=1),
                            title='LR / Recovered / Ground-truth'
                            ) if show_img else None
                util.imsave(
                    np.concatenate([img_I, img_E, img_H], axis=1),
                    os.path.join(E_path,
                                 img_name + '_k' + str(k_index) + '_LEH.png'))

            if save_L:
                util.imsave(
                    util.single2uint(img_L),
                    os.path.join(E_path,
                                 img_name + '_k' + str(k_index) + '_LR.png'))

            psnr = util.calculate_psnr(
                img_E, img_H, border=border)  # change with your own border
            test_results['psnr'].append(psnr)
            logger.info('{:->4d}--> {:>10s} --k:{:>2d} PSNR: {:.2f}dB'.format(
                idx + 1, img_name + ext, k_index, psnr))

        # --------------------------------
        # Average PSNR
        # --------------------------------

        ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
        logger.info(
            '------> Average PSNR of ({}), kernel: ({}) sigma: ({:.2f}): {:.2f} dB'
            .format(testset_name, k_index, noise_level_model, ave_psnr))
        test_results_ave['psnr'].append(ave_psnr)
コード例 #59
0
s = sio.idl.readsav(fp+'model/sp_v1_20130219_4STAR.out')
print s.keys()
print 'sp', s.sp.shape
print 'sp (wp,   wvl,  z,  re,  ta)'


# ### Compare to lut from NAAMES

# In[9]:

fp_lut_mat = 'C:\\Users\\sleblan2\\Research\\NAAMES\\lut\\v2_NAAMES_lut.mat'
print('Loading the lut file:{}'.format(fp_lut_mat))
if not os.path.isfile(fp_lut_mat):
    print('File {} does not exist'.format(fp_lut_mat))
    raise IOError('LUT File not found: {}'.format(fp_lut_mat))
luts = hs.loadmat(fp_lut_mat)


# In[22]:

luts.keys()


# In[23]:

luts['irr_up'].shape


# In[27]:

luts['rad'].shape, s.sp.shape
コード例 #60
0
# In[4]:

dds = ['20151104','20151109','20151112','20151114','20151117','20151118','20151123']


# In[5]:

rts = []
sps = []


# In[6]:

for daystr in dds:
    print daystr
    rt = hs.loadmat(fp+'{}_zen_cld_retrieved.mat'.format(daystr))
    s = sio.loadmat(fp+'{}starzen.mat'.format(daystr))
    sp = Sp.Sp(s)
    rts.append(rt)
    sps.append(sp)


# # Start plotting the results

# In[7]:

rt.keys()


# In[8]: