def read_mat_files(feature_fname, info_fname):
    """docstring for read_mat_file"""
    
    print "reading features"
    tic = time.time()
    #f = h5py.File(feature_fname, 'r')
    import ipdb;ipdb.set_trace()
    f = io.loadmat(feature_fname)
    ff = f["feats"]
    features = sp.array(ff)
    print "time taken :", time.time() - tic, 'seconds'

    print "reading actions"
    tic = time.time()
    actions = io.loadmat(info_fname)['act']
    actions = sp.array([str(actions[i][0][0]) for i in xrange(actions.shape[0])])
    print "time taken :", time.time() - tic, 'seconds'

    print "reading names"
    tic = time.time()
    names = io.loadmat(info_fname)['name']
    names = sp.array([str(names[i][0][0]) for i in xrange(names.shape[0])])
    print "time taken :", time.time() - tic, 'seconds'
    
    return features, actions, names
Example #2
0
def load_W_and_P(data_dir=LINEAR_MODEL_DIRECTORY):
    """Load weight and p-value matrices."""

    # Load files
    D_W_ipsi = sio.loadmat(data_dir + '/W_ipsi.mat')
    D_W_contra = sio.loadmat(data_dir + '/W_contra.mat')
    D_PValue_ipsi = sio.loadmat(data_dir + '/PValue_ipsi.mat')
    D_PValue_contra = sio.loadmat(data_dir + '/PValue_contra.mat')

    # Make weight matrix for each side, then concatenate them
    W_L = np.concatenate([D_W_ipsi['data'], D_W_contra['data']], 1)
    W_R = np.concatenate([D_W_contra['data'], D_W_ipsi['data']], 1)
    W = np.concatenate([W_L, W_R], 0)
    # Make p_value matrix in the same manner
    P_L = np.concatenate([D_PValue_ipsi['data'], D_PValue_contra['data']], 1)
    P_R = np.concatenate([D_PValue_contra['data'], D_PValue_ipsi['data']], 1)
    P = np.concatenate([P_L, P_R], 0)

    col_labels = D_W_ipsi['col_labels']
    # Add ipsi & contra to col_labels
    col_labels_L = [label.split(' ')[0] + '_L' for label in col_labels]
    col_labels_R = [label.split(' ')[0] + '_R' for label in col_labels]
    labels = col_labels_L + col_labels_R

    return W, P, labels
Example #3
0
def getData(m,dts,ch,rank=0):
    ''' simple function to grab data 
    returns zero mean, normalized data sample
    '''
    
#    import matplotlib.pyplot as plt
    if dts == 'plmr':
        D = spio.loadmat('../data/plmr.mat')
        cix = 1000 + rank*m # np.random.random_integers(0,upb,1)
        slz = slice(cix,cix+m)
        y = D['fs'][slz].astype('complex128').flatten()
        y = y - np.mean(y)
        y = [y]
        
    if dts == 'mpk':
        nbr = (np.floor(rank/2) + 900).astype('int64')
        D = spio.loadmat('../data/lcd' + repr(nbr) + '.mat')
        
        y = list()
        for ix in xrange(ch):
            yl = D['alldat'][0][ix][:m].astype('complex128').flatten()
            yl = yl-np.mean(yl)
            y.append(yl)
    
    print 'shape of y ' + repr(len(y))
    print [yl.shape for yl in y]
def keypoint_detection():
    try:
        data = sio.loadmat('data.mat')
    except:
        load.csv()
        data = sio.loadmat('data.mat')

    train_x = data['train_x']
    train_y = data['train_y']
    test_x = data['test_x']

    # data normalization
    train_x = train_x / 256.0
    train_y = (train_y - 48) / 48.0
    test_x = test_x / 256.0

    sklearn.utils.shuffle(train_x, train_y, random_state=0)

    train_x, valid_x = train_x[:-400], train_x[-400:]
    train_y, valid_y = train_y[:-400], train_y[-400:]

    model = Model(0.01, 0.9, 0.0005, 100, 10000)

    model.add_layer(layers.FullConnectedLayer(9216, 256, 1, layers.rectify))
    model.add_layer(layers.DropoutLayer(0.5))
    model.add_layer(layers.FullConnectedLayer(256, 100, 1, layers.rectify))
    model.add_layer(layers.DropoutLayer(0.5))
    model.add_layer(layers.FullConnectedLayer(100, 30))
    model.set_loss_function(layers.EuclideanLoss)

    model.build()
    print 'build model complete'
    model.train_model(train_x, train_y, valid_x, valid_y)
    model.save_test_result(test_x)
Example #5
0
def load_matlab_matrix( matfile, matname=None ):
    """
    Wraps scipy.io.loadmat.

    If matname provided, returns np.ndarray representing the index
    map. Otherwise, the full dict provided by loadmat is returns.
    """
    if not matname:
        out = spio.loadmat( matfile )
        mat = _extract_mat( out )
        # if mat is a sparse matrix, convert it to numpy matrix
        try:
            mat = np.matrix( mat.toarray() )
        except AttributeError:
            mat = np.matrix( mat )
        return mat
    else:
        matdict = spio.loadmat( matfile )
        mat = matdict[ matname ]
        # if mat is a sparse matrix, convert it to numpy matrix
        try:
            mat = np.matrix( mat.toarray() )
        except AttributeError:
            mat = np.matrix( mat )
        return mat #np.matrix( mat[ matname ] )
def predict(filein_name):
    """预测

    """
    filein_name = '0908-12.txt'
    # get models
    from sklearn.externals import joblib
    LR010 = joblib.load('0903_uid_ave_010.pkl') 
    LR001 = joblib.load('0903_uid_ave_001.pkl') 
    LR100 = joblib.load('0903_uid_ave_100.pkl') 

    import scipy.io as sio

    X = sio.loadmat('uid_dict_X001-12.mat')['X']
    y_predict_prob = LR001.predict_proba(X)
    print(y_predict_prob.shape)
    sio.savemat(filein_name[:-4] + 'y001.mat', {'y':y_predict_prob})

    X = sio.loadmat('uid_dict_X010-12.mat')['X']
    y_predict_prob = LR010.predict_proba(X)
    print(y_predict_prob.shape)
    sio.savemat(filein_name[:-4] + 'y010.mat', {'y':y_predict_prob})

    X = sio.loadmat('uid_dict_X100-12.mat')['X']
    y_predict_prob = LR100.predict_proba(X)
    print(y_predict_prob.shape)
    sio.savemat(filein_name[:-4] + 'y100.mat', {'y':y_predict_prob})
Example #7
0
    def __init__(self, complete_path):

        if complete_path.endswith('.mat.gz'):
            temp_filename = complete_path.split('.gz')[0]
            with open(temp_filename, "wb") as tmp:
                shutil.copyfileobj(gzip.open(complete_path), tmp)
            dict_mr = sio.loadmat(temp_filename)
            os.remove(temp_filename)
        elif complete_path.endswith('.mat'):
            dict_mr = sio.loadmat(complete_path)
        else:
            print('Unknown file extension for MountainRange file. Should be ' +
                  '.mat or .mat.gz')
        self.value = dict_mr['value']
        self.trigger_stamp = dict_mr['triggerStamp']
        self.SC_numb = np.int(np.squeeze(dict_mr['superCycleNb']))
        self.first_trigger_t_stamp_unix = dict_mr['first_trigger_t_stamp_unix']
        self.sample_interval = float(np.squeeze(dict_mr['sampleInterval']))
        self.first_sample_time = dict_mr['firstSampleTime']
        self.sensitivity = dict_mr['sensitivity']
        self.offset = dict_mr['offset']
        self.SPSuser = dict_mr['SPSuser']
        self.t_stamp_unix = dict_mr['t_stamp_unix']

        self.time_axis = np.float_(range(self.value.shape[1]))*self.sample_interval-self.value.shape[1]*self.sample_interval/2.
Example #8
0
def tld_tracker(vid_proto, det):
    script = os.path.join(os.path.dirname(__file__),
        '../../External/tld_matlab/tld_track.m')
    bbox = det['bbox']
    frame_id = det['frame']
    fw_frames = frame_path_after(vid_proto, frame_id)
    bw_frames = frame_path_before(vid_proto, frame_id)[::-1]
    fw_out = temp_file(suffix='.mat')
    bw_out = temp_file(suffix='.mat')
    matlab_command(script, [bbox,] + fw_frames, fw_out)
    matlab_command(script, [bbox,] + bw_frames, bw_out)
    try:
        fw_trk = loadmat(fw_out)['bbox']
    except:
        logging.error("Forward tracking failed.")
        fw_trk = [bbox+[1.]]+[[float('nan')]*5]*(len(fw_frames)-1)

    try:
        bw_trk = loadmat(bw_out)['bbox']
    except:
        logging.error("Backward tracking failed.")
        bw_trk = [[float('nan')]*5]*(len(bw_frames)-1) + [bbox+[1.]]

    os.remove(fw_out)
    os.remove(bw_out)
    bw_trk = bw_trk[::-1]
    if len(fw_trk) > 1:
        trk = np.concatenate((bw_trk, fw_trk[1:]))
    else:
        trk = bw_trk
    tracks_proto = tracks_proto_from_boxes(trk, vid_proto['video'])
    return tracks_proto
def contrast_session(session, C_path = os.getcwd()): # takes an integer for session ID, and a path.
    """-\nsession ID, and data path required.\nreturns list of ST_tbc matrices for every image in session."""
    
    session_data = sio.loadmat(os.path.join(C_path, 'goodCh_cont'))['goodCh_cont']

    sName = session_data[session,0][0]
    one_session = sio.loadmat(os.path.join(C_path, sName))
    
    trials = one_session['MUA'] #trials.shape =>  (32 channel, ~900 trial)
    img = np.squeeze(one_session['Cond'])
    img_trialNum = Counter(img)
    
    ST = []
    for p in range(len(img_trialNum)):
        st = np.zeros((img_trialNum[p+1],4500,trials.shape[0])) # ST_tbc
        
        for channel in range(trials.shape[0]):
            img_trials = trials[channel, img == p+1] # list of trials here.
            
            for trial in range(len(img_trials)): # mert kell az index, hogy el tudjam helyezni.
                for ap in range(img_trials[trial].shape[1]):
                    b = int(np.ceil(img_trials[trial][0][ap]*1000)) # change to ms!
                    st[trial, b, channel] = 1   # b stands for bin, bin is occ in python.
        ST.append(st)
    print sName
    return ST
Example #10
0
def excute_test(pairlist="../data/pairlist_lfw.mat", test_data="../data/lbp_lfw.mat", result_fold="../result/"):
    with open(result_fold+"A.pkl", "rb") as f:
        A = pickle.load(f)
    with open(result_fold+"G.pkl", "rb") as f:
        G = pickle.load(f)

    pair_list = loadmat(pairlist)['pairlist_lfw']
    test_Intra = pair_list['IntraPersonPair'][0][0] - 1
    test_Extra = pair_list['ExtraPersonPair'][0][0] - 1


    print test_Intra, test_Intra.shape
    print test_Extra, test_Extra.shape

    data  = loadmat(test_data)['lbp_lfw']
    data  = data_pre(data)

    clt_pca = joblib.load(result_fold+"pca_model.m")
    data = clt_pca.transform(data)
    data_to_pkl(data, result_fold+"pca_lfw.pkl")

    data = read_pkl(result_fold+"pca_lfw.pkl")
    print data.shape

    dist_Intra = get_ratios(A, G, test_Intra, data)
    dist_Extra = get_ratios(A, G, test_Extra, data)

    dist_all = dist_Intra + dist_Extra
    dist_all = np.asarray(dist_all)
    label    = np.append(np.repeat(1, len(dist_Intra)), np.repeat(0, len(dist_Extra)))

    data_to_pkl({"distance": dist_all, "label": label}, result_fold+"result.pkl")
Example #11
0
    def _loadGEval(self):
        print('Loading densereg GT..')
        prefix = os.path.dirname(__file__) + '/../../DensePoseData/eval_data/'
        print(prefix)
        SMPL_subdiv = loadmat(prefix + 'SMPL_subdiv.mat')
        self.PDIST_transform = loadmat(prefix + 'SMPL_SUBDIV_TRANSFORM.mat')
        self.PDIST_transform = self.PDIST_transform['index'].squeeze()
        UV = np.array([
            SMPL_subdiv['U_subdiv'],
            SMPL_subdiv['V_subdiv']
        ]).squeeze()
        ClosestVertInds = np.arange(UV.shape[1])+1
        self.Part_UVs = []
        self.Part_ClosestVertInds = []
        for i in np.arange(24):
            self.Part_UVs.append(
                UV[:, SMPL_subdiv['Part_ID_subdiv'].squeeze()==(i+1)]
            )
            self.Part_ClosestVertInds.append(
                ClosestVertInds[SMPL_subdiv['Part_ID_subdiv'].squeeze()==(i+1)]
            )

        arrays = {}
        f = h5py.File( prefix + 'Pdist_matrix.mat')
        for k, v in f.items():
            arrays[k] = np.array(v)
        self.Pdist_matrix = arrays['Pdist_matrix']

        print('Loaded')
def unify_newLabel_to_existing(matfiles, LabelName, IDName):
    flab     = [] #final labels
    ftrjID   = [] #final trjID

    for matidx in range(len(matfiles)): 
        L1 = loadmat(matfiles[matidx])[LabelName][0]
        # L1 = L1+1 # class label starts from 1 instead of 0
        M1 = loadmat(matfiles[matidx])[IDName][0]

        if len(flab)>0:
            Labelnowmax = max(flab)
            L1 = L1+Labelnowmax+1
            commonidx = np.intersect1d(M1,ftrjID)  #trajectories existing in both 2 trucations

            print('flab : {0}, new labels : {1} ,common term : {2}').format(len(np.unique(flab)),len(np.unique(L1)),len(commonidx))
            for i in commonidx:
                labelnew = L1[M1==i][0]
                labelnow = np.array(flab)[ftrjID==i][0]
                idx1  = np.where(L1==labelnew)[0]
                L1[idx1] = labelnow  ## keep the first appearing label

        flab[:]   = flab +list(L1)
        ftrjID[:] = ftrjID + list(M1)
    
    ftrjID, indices= np.unique(ftrjID,return_index=True)
    flab = np.array(flab)[indices] 

    return flab, ftrjID
 def get_images(self, img_name):
     stp = str(img_name)
     if img_name < 10:
         stp = '0000' + stp
     elif img_name < 100:
         stp = '000' + stp
     elif img_name < 1000:
         stp = '00' + stp
     else:
         stp = '0' + stp
     img_path = 'data/portraitFCN_data/' + stp + '.mat'
     alpha_path = 'data/images_mask/' + stp + '_mask.mat'
     if os.path.exists(img_path) and os.path.exists(alpha_path):
         imat = sio.loadmat(img_path)['img']
         amat = sio.loadmat(alpha_path)['mask']
         nimat = np.array(imat, dtype=np.float)
         namat = np.array(amat, dtype=np.int)
         org_mat = np.zeros(nimat.shape, dtype=np.int)
         h, w, _ = nimat.shape
         for i in range(h):
             for j in range(w):
                 org_mat[i][j][0] = round(nimat[i][j][2] * 255 + 122.675)
                 org_mat[i][j][1] = round(nimat[i][j][1] * 255 + 116.669)
                 org_mat[i][j][2] = round(nimat[i][j][0] * 255 + 104.008)
         return nimat, namat, org_mat
     return None, None, None
    def __init__(self, db_path='', use_extra=True):
        Dataset.__init__(self)
        print("Loading files")
        self.data_dims = [32, 32, 3]
        self.range = [0.0, 1.0]
        self.name = "svhn"
        self.train_file = os.path.join(db_path, "train_32x32.mat")
        self.extra_file = os.path.join(db_path, "extra_32x32.mat")
        self.test_file = os.path.join(db_path, "test_32x32.mat")
        if use_extra:
            self.train_file = self.extra_file

        # Load training images
        if os.path.isfile(self.train_file):
            mat = sio.loadmat(self.train_file)
            self.train_image = mat['X'].astype(np.float32)
            self.train_label = mat['y']
            self.train_image = np.clip(self.train_image / 255.0, a_min=0.0, a_max=1.0)
        else:
            print("SVHN dataset train files not found")
            exit(-1)
        self.train_batch_ptr = 0
        self.train_size = self.train_image.shape[-1]

        if os.path.isfile(self.test_file):
            mat = sio.loadmat(self.test_file)
            self.test_image = mat['X'].astype(np.float32)
            self.test_label = mat['y']
            self.test_image = np.clip(self.test_image / 255.0, a_min=0.0, a_max=1.0)
        else:
            print("SVHN dataset test files not found")
            exit(-1)
        self.test_batch_ptr = 0
        self.test_size = self.test_image.shape[-1]
        print("SVHN loaded into memory")
Example #15
0
def test_spm_hrf_octave():
    # Test SPM hrf against output from SPM code running in Octave
    my_path = dirname(__file__)
    hrfs_path = pjoin(my_path, 'spm_hrfs.mat')
    # mat file resulting from make_hrfs.m
    hrfs_mat = sio.loadmat(hrfs_path, squeeze_me=True)
    params = hrfs_mat['params']
    hrfs = hrfs_mat['hrfs']
    for i, pvec in enumerate(params):
        dt, ppk, upk, pdsp, udsp, rat = pvec
        t_vec = np.arange(0, 32.1, dt)
        our_hrf = spm_hrf_compat(t_vec,
                                 peak_delay=ppk,
                                 peak_disp=pdsp,
                                 under_delay=upk,
                                 under_disp=udsp,
                                 p_u_ratio=rat)
        # Normalize integral to match SPM
        assert_almost_equal(our_hrf, hrfs[i])
    # Test basis functions
    # mat file resulting from get_td_dd.m
    bases_path = pjoin(my_path, 'spm_bases.mat')
    bases_mat = sio.loadmat(bases_path, squeeze_me=True)
    dt = bases_mat['dt']
    t_vec = np.arange(0, 32 + dt, dt)
    # SPM function divides by sum of values - revert with dt
    assert_almost_equal(spmt(t_vec), bases_mat['hrf'] / dt, 4)
    assert_almost_equal(dspmt(t_vec), bases_mat['dhrf'] / dt, 4)
    assert_almost_equal(ddspmt(t_vec), bases_mat['ddhrf'] / dt, 4)
Example #16
0
def compute_distances(mav_fname, labellist, category_name, 
                      featurefilepath, layer = 'fc8'):
    """
    Input:
    -------
    mav_fname : path to filename that contains mean activation vector
    labellist : list of labels from ilsvrc 2012
    category_name : synset_id

    """
    
    
    mean_feature_vec = loadmat(mav_fname)[category_name]
    print '%s/%s/*.mat' %(featurefilepath, category_name)
    featurefile_list = glob.glob('%s/*.mat' %featurefilepath)

    correct_features = []
    for featurefile in featurefile_list:
        try:
            img_arr = loadmat(featurefile)
            predicted_category = labellist[img_arr['scores'].argmax()]
            if predicted_category == category_name:
                correct_features += [img_arr[layer]]
        except TypeError:
            continue

    distance_distribution = compute_channel_distances(mean_feature_vec, correct_features, category_name)
    return distance_distribution
Example #17
0
def loadfile_hfreud(filename, alpha, rho, n):

    filename = os.path.join(data_directory, filename)
    try:
        data = loadmat(filename)['data'].flatten()
    except:
        data = np.zeros(0)

    if data.size < n+1:
        # Run matlab to generate/populate file

        print("Calling matlab....")
        cwd = os.path.dirname(os.path.abspath(__file__))

        command  = "cd(" + "'" + cwd + "'); cd ..; "

        command += "data = load_fhfreud({:d}, {:.4f}, {:.4f}); ".format(n, alpha, rho)
        command += "data = fidistinv_hfreud_setup({:d}, {:.4f}, {:.4f}, data); ".format(n, alpha, rho)
        command += "save_fhfreud(data, {:.4f}, {:.4f}); ".format(alpha, rho)

        command += "exit"
        print(command)

        subprocess.call([matlab_binary, "-nodisplay", "-r", command])
        print("...finished")

        data = loadmat(filename)['data'].flatten()

    return data
Example #18
0
def test_edf_data():
    """Test reading raw edf files"""
    raw_py = read_raw_edf(edf_path, misc=range(-4, 0), stim_channel=139,
                          preload=True)

    picks = pick_types(raw_py.info, meg=False, eeg=True,
                       exclude=['EDF Annotations'])
    data_py, _ = raw_py[picks]

    print(raw_py)  # to test repr
    print(raw_py.info)  # to test Info repr

    # this .mat was generated using the EEG Lab Biosemi Reader
    raw_eeglab = io.loadmat(edf_eeglab_path)
    raw_eeglab = raw_eeglab['data'] * 1e-6  # data are stored in microvolts
    data_eeglab = raw_eeglab[picks]

    assert_array_almost_equal(data_py, data_eeglab, 10)

    # Make sure concatenation works
    raw_concat = concatenate_raws([raw_py.copy(), raw_py])
    assert_equal(raw_concat.n_times, 2 * raw_py.n_times)

    # Test uneven sampling
    raw_py = read_raw_edf(edf_uneven_path, stim_channel=None)
    data_py, _ = raw_py[0]
    # this .mat was generated using the EEG Lab Biosemi Reader
    raw_eeglab = io.loadmat(edf_uneven_eeglab_path)
    raw_eeglab = raw_eeglab['data']
    data_eeglab = raw_eeglab[0]

    # match upsampling
    upsample = len(data_eeglab) / len(raw_py)
    data_py = np.repeat(data_py, repeats=upsample)
    assert_array_equal(data_py, data_eeglab)
Example #19
0
def _download_norb_small(dataset):
    """
    Download the Norb dataset
    """
    from scipy.io import loadmat
    print 'Downloading small resized norb data'

    urllib.urlretrieve('http://dl.dropbox.com/u/13294233/smallnorb/smallnorb-'
                       '5x46789x9x18x6x2x32x32-training-dat-matlab-bicubic.mat',
                       dataset + '/smallnorb_train_x.mat')
    urllib.urlretrieve('http://dl.dropbox.com/u/13294233/smallnorb/smallnorb-'
                       '5x46789x9x18x6x2x96x96-training-cat-matlab.mat',
                       dataset + '/smallnorb_train_t.mat')

    urllib.urlretrieve('http://dl.dropbox.com/u/13294233/smallnorb/smallnorb-'
                       '5x01235x9x18x6x2x32x32-testing-dat-matlab-bicubic.mat',
                       dataset + '/smallnorb_test_x.mat')
    urllib.urlretrieve('http://dl.dropbox.com/u/13294233/smallnorb/smallnorb-'
                       '5x01235x9x18x6x2x96x96-testing-cat-matlab.mat',
                       dataset + '/smallnorb_test_t.mat')

    data = loadmat(dataset + '/smallnorb_train_x.mat')['traindata']
    train_x = np.concatenate([data[:,0,:].T, data[:,0,:].T]).astype('float32')
    data = loadmat(dataset + '/smallnorb_train_t.mat')
    train_t = data['trainlabels'].flatten().astype('float32')
    train_t = np.concatenate([train_t, train_t])

    data = loadmat(dataset + '/smallnorb_test_x.mat')['testdata']
    test_x = np.concatenate([data[:,0,:].T, data[:,0,:].T]).astype('float32')
    data = loadmat(dataset + '/smallnorb_test_t.mat')
    test_t = data['testlabels'].flatten().astype('float32')
    test_t = np.concatenate([test_t, test_t])
    with open(dataset+'/norbsmall32x32.cpkl','w') as f:
        cPkl.dump([train_x, train_t, test_x, test_t], f,
                  protocol=cPkl.HIGHEST_PROTOCOL)
def demo_lab1():

    # this part will load the dataset
    one_train=sio.loadmat('one_train.mat')
    seven_train=sio.loadmat('seven_train.mat')
    
    TRAIN_ONES=one_train['one_train']
    TRAIN_SEVENS=seven_train['seven_train']
    TRAIN=np.concatenate((TRAIN_ONES, TRAIN_SEVENS), axis=0)
    
    LABEL_ONES=np.ones((300,1))
    LABEL_SEVENS=np.ones((300,1))*-1
    LABEL=np.concatenate((LABEL_ONES, LABEL_SEVENS), axis=0)
 
    #Trivial Part
    YOURNAME = ...          # eg. 'john_smith' pay attention to the underscore
          
    #Challenging Part
    N_SPLIT =  ...                                 # eg. 5     see 'kcv'
    SPLIT_TYPE = ...                        # eg. 'Sequential' see 'kcv'
    KERNEL = ...                              # eg. 'Linear' see 'KernelMatrix'
    KERNEL_PARAMETER = ...           #fix it manually or by autosigma for example with autosigma(TRAIN,5). see 'KernelMatrix' 'kcv' and 'autosigma'
    TRANGE =  ...                      # eg. np.logspace(-3, 3, 7) or np.linspace(0.1, 10, 10)

    t_kcv_idx, avg_err_kcv = kcv(KERNEL, KERNEL_PARAMETER, 'Reg. Least Squared', TRANGE, TRAIN, LABEL, N_SPLIT, 'Classification', SPLIT_TYPE)
    save_challenge_1(YOURNAME, TRANGE[t_kcv_idx], KERNEL, KERNEL_PARAMETER, avg_err_kcv[0][t_kcv_idx])

    return
Example #21
0
def read_meta_files(labels_fname, camname_fname, actname_fname, partiname_fname, viewname_fname):
    
    print "reading participant names"
    tic = time.time()
    partiNames = np.squeeze(io.loadmat(partiname_fname)['myPartis'])
    partiNames_items = np.squeeze(io.loadmat(partiname_fname+'_items')['myPartis_items'])
    print "time taken :", time.time() - tic, 'seconds'
    
    print "reading labels"
    tic = time.time()
    labels = np.squeeze(io.loadmat(labels_fname)['myLabels'])
    labels_items = np.squeeze(io.loadmat(labels_fname+'_items')['myLabels_items'])
    print "time taken :", time.time() - tic, 'seconds'
    
    print "reading camera names"
    tic = time.time()
    camNames = np.squeeze(io.loadmat(camname_fname)['myCams'])
    camNames_items = np.squeeze(io.loadmat(camname_fname+'_items')['myCams_items'])
    print "time taken :", time.time() - tic, 'seconds'

    print "reading action names"
    tic = time.time()
    actNames = np.squeeze(io.loadmat(actname_fname)['myActs'])
    actNames_items = np.squeeze(io.loadmat(actname_fname+'_items')['myActs_items'])
    print "time taken :", time.time() - tic, 'seconds'
    
    print "reading view names"
    tic = time.time()
    viewNames = np.squeeze(io.loadmat(viewname_fname)['myViews'])
    viewNames_items = np.squeeze(io.loadmat(viewname_fname+'_items')['myViews_items'])
    print "time taken :", time.time() - tic, 'seconds'
    
    

    return labels, camNames, actNames, partiNames, viewNames
Example #22
0
 def get_data_1(self,numero,radical,suffix='_func_data'):
     fbd=loadmat('./CEC05_files/fbias_data.mat')  # the f_bias dictionary
     fb=fbd['f_bias'][0,:]                        # the f_bias array
     fbias=fb[numero-1]                             # the desired f_bias for this function
     od=loadmat('./CEC05_files/'+radical+suffix+'.mat')    # the shift data dictionary
     o=od['o'][0,:]                                           # the shift data array
     return fbias,o
Example #23
0
def load_pertub_data_cifar(dirs='data_imputation/', dataset='cifar10_gcn_var', pertub_type=3, pertub_prob=6):
    # perturb data
    print 'Loading perturbed data...'

    if pertub_type==4:
        zz = sio.loadmat(dirs+dataset+'_type_'+str(pertub_type)+'_params_'+str(int(pertub_prob*100))+'_noise_rawdata.mat')
    elif pertub_type==3:
        pertub_prob = int(pertub_prob)
        zz = sio.loadmat(dirs+dataset+'_type_'+str(pertub_type)+'_params_'+str(pertub_prob)+'_noise_rawdata.mat')
    elif pertub_type==5:
        zz = sio.loadmat(dirs+dataset+'_type_'+str(pertub_type)+'_params_noise_rawdata.mat')
    else:
        print 'Error in load_pertub_data'
        print dirs, pertub_type, pertub_prob
        exit()

    data_train = zz['z_train'].T
    data = zz['z_test_original'].T
    data_perturbed = zz['z_test'].T
    pertub_label = zz['pertub_label'].astype(np.float32).T
    pertub_number = float(np.sum(1-pertub_label))

    print pertub_number, data_train.shape, data.shape, data_perturbed.shape, pertub_label.shape

    data_train = theano.shared(np.asarray(data_train, dtype=theano.config.floatX), borrow=True)
    data = theano.shared(np.asarray(data, dtype=theano.config.floatX), borrow=True)
    data_perturbed = theano.shared(np.asarray(data_perturbed, dtype=theano.config.floatX), borrow=True)
    pertub_label = theano.shared(np.asarray(pertub_label, dtype=theano.config.floatX), borrow=True)

    return data_train, data, data_perturbed, pertub_label, pertub_number
Example #24
0
def loadMAT(slice_filename,parameters_filename):
    '''
    Created to convert .mat files with specific configuration for ECoG data of Newcastle Hospitals and create a dict.
    If you want to load other .mat file, use scipy.io. loadmat and create_DataObj
    
    Parameters
    ----------
    slice_filename: str 
        Name of the slice (.mat) file 
    parameters_filename: str 
        Name of the parameters (.mat) file
    
    '''
    mat = sio.loadmat(parameters_filename, struct_as_record=False, squeeze_me=True)
    parameters = mat['parameters']
    ch_l = parameters.channels
    ch_labels = [str(x) for x in ch_l]
    sample_rate = parameters.sr
    f = sio.loadmat(slice_filename, struct_as_record=False, squeeze_me=True)
    Data = f['Data']
    time_vec = Data.time_vec
    signal = Data.raw.T
    amp_unit = '$\mu V$'
    Data = DataObj(signal,sample_rate,amp_unit,ch_labels,time_vec,[])
    return Data
Example #25
0
def load_simTB_data(source_directory):
    """
    Load simTB data along with simulation info.
    """
    nifti_files = natural_sort(glob(path.join(source_directory, "*_DATA.nii")))
    sim_files = natural_sort(glob(path.join(source_directory, "*_SIM.mat")))
    if len(nifti_files) != len(sim_files):
        raise ValueError("Different number of DATA and SIM files found int %s"
                         % source_directory)
    assert len(nifti_files) > 0

    param_files = glob(path.join(source_directory, "*PARAMS.mat"))
    if len(param_files) != 1:
        raise ValueError("Exactly one param file needed, found %d in %s"
                         % (len(param_files), source_directory))
    params = tuple(io.loadmat(param_files[0])["sP"][0][0])

    sim_dict = {}
    for i, (nifti_file, sim_file) in enumerate(zip(nifti_files, sim_files)):
        assert "%03d" % (i + 1) in nifti_file
        assert "%03d" % (i + 1) in sim_file
        sims = io.loadmat(sim_file)
        tcs = sims["TC"].T
        sms = sims["SM"]
        sim_dict[i] = {"SM": sms, "TC": tcs}
    sim_dict["params"] = params

    data, labels, base = read_niftis(nifti_files)
    return data, labels, sim_dict
Example #26
0
def read_dataset( stimulus_pattern='stimulus_%d.mat', data_file='data.mat'):
    from scipy.io import loadmat
    data = loadmat(data_file)
    data = data['data']    
    spikes = data['spike_rate'][0][0]
    del data['spike_rate']
    data['rgc_ids']        = data['rgc_ids'][0][0][0]    
    data['cone_weights']   = data['cone_weights'][0][0]    
    data['cone_types']     = data['cone_types'][0][0].tolist()    
    data['cone_locations'] = data['cone_locations'][0][0]    
    data['rgc_locations']  = numpy.array([d[0][0] for d in data['rgc_locations'][0][0]])    
    data['rgc_types']      = dict((d[0][0],d[1][0].tolist()) 
                                   for d in filter( lambda d : len( d[0] )>0 , [d[0][0] 
                                   for d in data['cell_types'][0][0][0]] ))
    try:
        i = 0
        N_timebins = 0
        while 1:
            data['stimulus'] = loadmat(stimulus_pattern % i)['cone_input'].T
            data['spikes'] = spikes[N_timebins:N_timebins+data['stimulus'].shape[1]]
            N_timebins += data['stimulus'].shape[1]
            i += 1
            yield data
    except:
        raise StopIteration()        
Example #27
0
def TenTwentyDownslopeBPF(FullPath):
    TempClipData = spio.loadmat(FullPath)
    TempDataArray = TempClipData['data']
    TempDataArray = TempDataArray.transpose()

    Fsample = float(TempClipData['freq'])       #Sampling frequency
    dt = 1.0/Fsample                            #Time between samples
    TimeValues = np.arange(0.0, 1.0, dt)        #Construct ndarray of time values
    LastChan = int(TempDataArray.shape[1])      #Last channel number
    Channels = np.arange(0, LastChan,1)         #List of channel numbers
    
    FeatureOutput = np.zeros(LastChan)          #Initialize the output 

    # Read in the digital filter coeficients and place in ndarrays
    FilterInfo=spio.loadmat('FilterSetTenTwentyDownslopeBPF.mat')

    FilterCoefI = FilterInfo['FilterCoefI'].flatten()
    FilterCoefQ = FilterInfo['FilterCoefQ'].flatten()

    # Calculate the feature values for each channel       
    for i in Channels:
        Iproduct = FilterCoefI*TempDataArray[:,i]
        Isum=np.sum(Iproduct[i])
        
        Qproduct = FilterCoefQ*TempDataArray[:,i]
        Qsum=np.sum(Qproduct)
        
        FeatureOutput[i] = np.log( np.sqrt(Isum*Isum + Qsum*Qsum) )
        FeatureOutput[i] = bender(FeatureOutput[i], 4.0, 4.0)               #Limit to range of 0 to 1.  Second arg is mean, third is span      

    FeatureList = FeatureOutput.tolist()        #Convert ndarray to list.  The returned value will be appended other values; this would be very inefficent with ndarray

    #Return feature vector in form of a list
    return(FeatureList)
Example #28
0
def main():
    predicted_mat = loadmat(args.predicted_mat)['labels']
    truth_mat = loadmat(args.truth_mat)['GT']
    mode = args.mode
    
    min_shape = np.minimum(predicted_mat.shape, truth_mat.shape)
    error = None
    if(mode == 'all'):
        error = compare_all_mats(predicted_mat, truth_mat, min_shape)
        print 1 - error/(min_shape[0]*min_shape[1]*min_shape[2])
    else:
        error = compare_single_mats(predicted_mat, truth_mat, min_shape)
        error = 1 - error/(min_shape[0]*min_shape[1])
        print error
    should_graph = args.graph
    if(should_graph == 'True'):
        y_axis = error
        x_axis = np.arange(len(y_axis))
        fig = plt.figure()
        ax = fig.add_subplot(111)
        print type(y_axis)
        #ax.plot(x_axis, y_axis)
        ax.scatter(x_axis, y_axis)
        ax.set_xlim([0,len(x_axis)])
        ax.set_ylim([0,1])
        plt.savefig(args.output)
Example #29
0
def save_crop_images_and_joints():
    training_indices = loadmat('data/FLIC-full/tr_plus_indices.mat')
    training_indices = training_indices['tr_plus_indices'].flatten()

    examples = loadmat('data/FLIC-full/examples.mat')
    examples = examples['examples'][0]
    joint_ids = ['lsho', 'lelb', 'lwri', 'rsho', 'relb', 'rwri', 'lhip',
                 'lkne', 'lank', 'rhip', 'rkne', 'rank', 'leye', 'reye',
                 'lear', 'rear', 'nose', 'msho', 'mhip', 'mear', 'mtorso',
                 'mluarm', 'mruarm', 'mllarm', 'mrlarm', 'mluleg', 'mruleg',
                 'mllleg', 'mrlleg']

    available = joint_ids[:8]
    available.extend(joint_ids[12:14])
    available.extend([joint_ids[16]])

    target_joints = ['lsho', 'lelb', 'lwri',
                     'leye', 'reye', 'nose',
                     'rsho', 'relb', 'rwri']

    fp_train = open('data/FLIC-full/train_joints.csv', 'w')
    fp_test = open('data/FLIC-full/test_joints.csv', 'w')
    for i, example in enumerate(examples):
        joint = example[2].T
        joint = dict(zip(joint_ids, joint))
        fname = example[3][0]
        joint = get_joint_list(joint)
        msg = '{},{}'.format(fname, ','.join([str(j) for j in joint.tolist()]))
        if i in training_indices:
            print(msg, file=fp_train)
        else:
            print(msg, file=fp_test)
Example #30
0
def sentCombMat_add(w1,w2,w3):
    root = u"I:/数据/word12585relation30/rel_30_ref_TFIDF/ref_800_TFIDF/rel_svd/file_word_lus/word_mat_latent_324/"
    wordList = getWordList()
    w1Mat = sio.loadmat(root+u"l_"+w1)[w1]
    w2Mat = sio.loadmat(root+u"l_"+w2)[w2]
    w3Mat = sio.loadmat(root+u"l_"+w3)[w3]
    return w1Mat+w2Mat+w3Mat
Example #31
0
import cmath
import numpy as np
import scipy.io as sio
from scipy.stats.mstats import mode

eeg = sio.loadmat('data/eeg')
#print (eeg.keys())      # keys are 'x_te', 'x_train', 'y_te', 'y_train'

X_train = eeg['x_train']
Y_train = eeg['y_train']
X_test = eeg['x_te']
Y_test = eeg['y_te']

N = 64
f, n = np.arange(N), np.arange(N)
bm = np.blackman(N)


# Creating DFT matrix F
def DFT(x):
    F = np.exp(-2j * cmath.pi / N * np.dot(f.reshape(len(f), 1),
                                           n.reshape(len(n), 1).T))
    return F


# Creating data matrix X
def create_X(x):
    for i in range(0, len(x), 48):
        sig = x[i:(i + 64)]
        l = len(sig)
        if l < 64:
Example #32
0
if __name__ == '__main__':
    path = r'C:\Users\DE\Downloads\NPC_Renew_Data_1'
    IDs = os.listdir(path)
    for id in IDs:
        idPath = path + '\\' + id
        print(idPath)
        labelFiles = os.listdir(idPath)
        print(labelFiles)
        for label in labelFiles:
            filePath = idPath + '\\' + label
            if os.path.isfile(filePath):
                (filename, extension) = os.path.splitext(label)
                pngDir = idPath + '\\' + filename
                if not os.path.exists(pngDir):
                    os.makedirs(pngDir)
                data = scio.loadmat(filePath)
                print(data.keys())
                seg = list(data.keys())[-1]
                data = data[seg]
                x, y, z = data.shape
                for i in range(0,z):
                    binaryimg = np.uint8(data[:,:,i]>0)
                    img = binaryimg*255
                    new_img = Image.fromarray(img, 'L')
                    name = pngDir + '\\' + str(i) + '.png'
                    new_img.save(name)
                    print(name)
                # print('It is a file')

        # Labels = os.listdir(r'C:\Users\DE\Downloads\NPC_Renew_Data_2' + '\\'+id)
        # print(Labels)
Example #33
0
    genparam['alpha'] = alpha
    genparam['delta'] = delta
    genparam['ndttrialrange'] = ndttrialrange
    genparam['deltatrialsd'] = deltatrialsd
    genparam['rt'] = rt
    genparam['acc'] = acc
    genparam['y'] = y
    genparam['participant'] = participant
    genparam['condition'] = condition
    genparam['nparts'] = nparts
    genparam['nconds'] = nconds
    genparam['ntrials'] = ntrials
    genparam['N'] = N
    sio.savemat('data/genparam_test4.mat', genparam)
else:
    genparam = sio.loadmat('data/genparam_test4.mat')

# Stan code

tostan = '''
functions { 
  /* Wiener diffusion log-PDF for a single response (adapted from brms 1.10.2)
   * Arguments: 
   *   Y: acc*rt in seconds (negative and positive RTs for incorrect and correct responses respectively)
   *   boundary: boundary separation parameter > 0
   *   ndt: non-decision time parameter > 0
   *   bias: initial bias parameter in [0, 1]
   *   drift: drift rate parameter
   * Returns:  
   *   a scalar to be added to the log posterior 
   */ 
Example #34
0
"""
Created on Wed Dec 11 18:31:17 2019

@author: Yuan Zhang
"""

import numpy as np
import scipy as sp
import scipy.io as sio
import matplotlib.pyplot as plt
import scipy.signal as sig
from sympy import diff
import scipy.stats as scs
import math
#load data
data = sio.loadmat('cw1_data2.mat')
dFonF = data['dFonF']
data1 = sio.loadmat('cw1_data1.mat')
mouse_theta = data1['mouse_theta']
fs = data1['fs'].flatten()[0]  # frequency is the same as coursework 1
row = np.size(dFonF, 0)  #75
col = np.size(dFonF, 1)  #7420
t = np.linspace(0, (col - 1) * 1 / fs, col)  #build time serious matrix

## question 1
yo = dFonF[3]

y = sig.savgol_filter(
    yo, 299, 6, mode='wrap'
)  #Too much noise may affect the determination of peak position.
#Because Savitzky-Golay filter has the advantage of preserving the area, position and width of peaks, so I choose this filter and find peak position on the filtered curve.


laplace = {'name': 'Laplace', 'weights': tc.laplace_weights, 'params': {'scale': width}}
delta = {'name': '$\delta$', 'weights': tc.eye_weights, 'params': tc.eye_params}
gaussian = {'name': 'Gaussian', 'weights': tc.gaussian_weights, 'params': {'var': width}}
mexican_hat = {'name': 'Mexican hat', 'weights': tc.mexican_hat_weights, 'params': {'sigma': width}}

factors = 700

if factors == 100:
    pieman_name = 'pieman_ica100.mat'
else:
    pieman_name = 'pieman_data.mat'

pieman_data = loadmat(os.path.join(config['datadir'], pieman_name))
pieman_conds = ['intact', 'paragraph', 'word', 'rest']

weights_paramter = eval(wp)

if debug:
    data = []
    conds = []
    for c in pieman_conds:
        next_data = list(map(lambda i: pieman_data[c][:, i][0][:30, :10], np.arange(4)))
        data.extend(next_data)
        conds.extend([c]*len(next_data))
    del pieman_data

else:
def load_weights():
    datas = loadmat('ex3weights.mat')
    theta1 = datas['Theta1']  # 25*401
    theta2 = datas['Theta2']  # 10*26
    return theta1, theta2
def load_data():
    datas = loadmat('ex3data1.mat')
    X = datas['X']  # 5000*20*20
    y = datas['y']
    return X, y
Example #38
0
    for f in range(FOLDS):
        idx_test1 = IDX1[offset1: offset1 + fsz1]
        idx_test2 = IDX2[offset2: offset2 + fsz2]

        AUC_test1[f], AUC_test2[f] = innerfold(GROUNDTRUTH1, GROUNDTRUTH2, T1, T2, idx_test1, idx_test2, idx_test1, idx_test2, e1, e2, k1, k2, SZ1, SZ2, alpha=alpha, rank=rank)

        offset1 += fsz1
        offset2 += fsz2

    return AUC_test1, AUC_test2


if __name__ == '__main__':

    mat = loadmat('data/uml.mat')
    #mat = loadmat('data/alyawarradata.mat')
    K = np.array(mat['Rs'], np.float32)
    print('K: ', K.shape)

    # fill nan values with 0s if exist
    #K = np.nan_to_num(K)

    n, d = K.shape[0], K.shape[2]
    n1 = int(n/2)
    d1 = int(d/2)

    K1 = K[:n1, :n1, :d1]   # note: when lambda_A, lambda_R = 5, 5 => 52 x 52 x 26 tensors -> 0.95 AUCs
    K2 = K[n1:, n1:, d1:]
    print('K1: ', K1.shape)
    print('K2: ', K2.shape)
def prepare_data(gt_2d_bdb=False, patch_h=224, patch_w=224, shift=True, iou_threshold=0.1):
    """
        Generating the ground truth for end-to-end training

        Parameters
        ----------
        gt_2d_bdb : bool
            indicates whether to use the ground truth of 2D bounding boxes
        patch_h: int
            the height of target resized patch
        patch_w: int
            the width of target resized potch
        iou_threshold : float
            iou threshold for two 2D bounding boxes
    """
    bin = PATH.bins()
    data_root = op.join(PATH.metadata_root, 'sunrgbd_train_test_data')
    train_path = list()
    test_path = list()
    layout_centroid = list()
    layout_coeffs = list()
    # obj_category = dict()
    if not op.exists(data_root):
        os.mkdir(data_root)
    for i in range(10335):
        sequence = readsunrgbdframe(image_id=i+1)
        print i+1
        sequence._R_tilt = loadmat(op.join(PATH.metadata_root, 'updated_rtilt', str(i+1) + '.mat'))['r_tilt']
        # R_ex is cam to world
        sequence._R_ex = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]]).dot(sequence.R_tilt).dot(np.array([[1, 0, 0], [0, 0, 1], [0, -1, 0]]))
        K = sequence.K
        result = []
        for bdb2d in sequence.bdb2d:
            if check_bdb(bdb2d, 2*sequence.K[0, 2], 2*sequence.K[1, 2]):
                result.append(bdb2d)
            else:
                print 'ground truth not valid'
        sequence._bdb2d = result

        bdb2d_from_3d_list = []
        with open(op.join(PATH.metadata_root, '2dbdb', str(i + 1) + '.json'), 'r') as f:
            detected_bdbs = json.load(f)
        f.close()
        boxes = list()
        for bdb3d in sequence.bdb3d:
            center = bdb3d['centroid'][0]
            coeffs = bdb3d['coeffs'][0]
            basis = bdb3d['basis'].astype('float32')
            if bdb3d['classname'][0] not in OBJ_CATEGORY_CLEAN:
                continue
            bdb2d_from_3d = project_struct_bdb_to_2d(basis, coeffs, center, sequence.R_ex.T, K)
            projected_2d_center = project_3d_points_to_2d(center.reshape(1, 3), sequence.R_ex.T, K)
            if bdb2d_from_3d is None:
                print '%s not valid' % (bdb3d['classname'][0])
                continue
            bdb2d_from_3d['classname'] = bdb3d['classname'][0]
            bdb2d_from_3d_list.append(bdb2d_from_3d)
            if gt_2d_bdb is True:
                max_iou = 0
                iou_ind = -1
                for j, bdb2d in enumerate(sequence.bdb2d):
                    if bdb2d['classname'] == bdb3d['classname'][0]:
                        iou = get_iou(bdb2d_from_3d, bdb2d)
                        if iou > iou_threshold and iou > max_iou:
                            iou_ind = j
                            max_iou = iou
                if iou_ind >= 0:
                    if shift:
                        shifted_box = random_shift_2d_box(sequence.bdb2d[iou_ind])
                        boxes.append({'2dbdb': shifted_box, '3dbdb': bdb3d,
                                      'projected_2d_center': projected_2d_center})
                    else:
                        boxes.append({'2dbdb': sequence.bdb2d[iou_ind], '3dbdb': bdb3d, 'projected_2d_center': projected_2d_center})
            else:
                max_iou = 0
                iou_ind = -1
                max_bdb = dict()
                for j, bdb2d in enumerate(detected_bdbs):
                    if bdb2d['class'] == bdb3d['classname'][0]:
                        box = bdb2d['bbox']
                        box = {'x1': box[0], 'y1': box[1], 'x2': box[2], 'y2': box[3]}
                        iou = get_iou(bdb2d_from_3d, box)
                        if iou > iou_threshold and iou > max_iou:
                            iou_ind = j
                            max_iou = iou
                            box['score'] = bdb2d['score']
                            box['classname'] = bdb2d['class']
                            max_bdb = box
                if iou_ind >= 0:
                    # print max_iou, bdb2d_from_3d, detected_bdbs[iou_ind]
                    if shift:
                        shifted_box = random_shift_2d_box(max_bdb)
                        boxes.append({'2dbdb': shifted_box, '3dbdb': bdb3d, 'projected_2d_center': projected_2d_center})
                    else:
                        boxes.append({'2dbdb': max_bdb, '3dbdb': bdb3d, 'projected_2d_center': projected_2d_center})
        # print boxes
        camera = dict()
        camera_flip = dict()
        camera['yaw_cls'], camera['yaw_reg'], camera['roll_cls'], camera['roll_reg'] = camera_cls_reg(sequence.R_ex.T, bin)
        camera['K'] = sequence.K
        # flip the camera
        camera_flip['yaw_cls'], camera_flip['yaw_reg'], camera_flip['roll_cls'], camera_flip['roll_reg'] = camera_cls_reg(sequence.R_ex.T, bin, flip=True)
        camera_flip['K'] = sequence.K
        template_path = op.join(PATH.metadata_root, 'size_avg_category.pickle')
        layout_pts = loadmat(op.join(PATH.metadata_root, '3dlayout', str(i+1) + '.mat'))['manhattan_layout'].T
        l_centroid, l_basis, l_coeffs = get_bdb_from_corners(layout_pts)
        # print l_centroid
        layout_centroid.append(l_centroid)
        layout_coeffs.append(l_coeffs)
        layout = dict()
        layout['centroid_reg'] = layout_centroid_avg_residual(l_centroid, bin['layout_centroid_avg'], bin['layout_normalize'])
        layout['coeffs_reg'] = layout_size_avg_residual(l_coeffs, bin['layout_coeffs_avg'])
        layout['ori_cls'], layout['ori_reg'] = ori_cls_reg(l_basis[1, :], bin, layout=True)
        layout_flip = dict()
        layout_flip['centroid_reg'] = layout_centroid_avg_residual(l_centroid, bin['layout_centroid_avg'], bin['layout_normalize'], flip=True)
        layout_flip['coeffs_reg'] = layout_size_avg_residual(l_coeffs, bin['layout_coeffs_avg'])
        layout_flip['ori_cls'], layout_flip['ori_reg'] = ori_cls_reg(l_basis[1, :], bin, layout=True, flip=True)
        # print layout['ori_cls'], layout_flip['ori_cls']
        # clean the ground truth
        with open(template_path, 'r') as f:
            size_template = pickle.load(f)
        f.close()
        boxes_out = list()
        boxes_out_flip = list()
        for box in boxes:
            box_set = dict()
            # box_set['ori_cls'], box_set['ori_reg'] = ori_cls_reg(box['3dbdb']['orientation'])
            box_set['ori_cls'], box_set['ori_reg'] = ori_cls_reg(box['3dbdb']['basis'][1, :], bin)
            # print box['3dbdb']['basis']
            # print basis_from_ori(num_from_bins(bin['ori_bin'], box_set['ori_cls'], box_set['ori_reg']))
            box_set['size_reg'] = size_avg_residual(box['3dbdb']['coeffs'][0], size_template, box['2dbdb']['classname'])
            box_set['bdb3d'] = get_corners_of_bb3d_no_index(box['3dbdb']['basis'], box['3dbdb']['coeffs'][0], box['3dbdb']['centroid'][0])
            box_set['x_cls'], box_set['x_reg'], box_set['y_cls'], box_set['y_reg'], box_set['z_cls'], box_set['z_reg'] = centroid_cls_reg(box['3dbdb']['centroid'][0], bin)
            box_set['bdb_pos'] = [box['2dbdb']['x1'], box['2dbdb']['y1'], box['2dbdb']['x2'], box['2dbdb']['y2']]
            box_set['bdb2d'] = [box['2dbdb']['x1'] / float(K[0, 2]), box['2dbdb']['y1'] / float(K[1, 2]), box['2dbdb']['x2'] / float(K[0, 2]), box['2dbdb']['y2'] / float(K[1, 2])]
            box_set['centroid_cls'], box_set['centroid_reg'] = bin_cls_reg(bin['centroid_bin'], np.linalg.norm(box['3dbdb']['centroid'][0]))
            delta_2d = list()
            delta_2d.append(((box_set['bdb_pos'][0] + box_set['bdb_pos'][2]) / 2 - box['projected_2d_center'][0][0]) / (box_set['bdb_pos'][2] - box_set['bdb_pos'][0]))
            delta_2d.append(((box_set['bdb_pos'][1] + box_set['bdb_pos'][3]) / 2 - box['projected_2d_center'][1][0]) / (box_set['bdb_pos'][3] - box_set['bdb_pos'][1]))
            box_set['delta_2d'] = delta_2d
            box_set['size_cls'] = OBJ_CATEGORY_CLEAN.index(box['2dbdb']['classname'])
            # print box_set['size_cls']
            # print box['2dbdb']['classname']
            boxes_out.append(box_set)
            # print box_set['3dbdb']['classname'], box_set['ori_cls'], box_set['ori_reg'], box_set['size_reg'], box_set['size_cls'], box_set['size_reg']
            # flip the boxes
            box_set_flip = dict()
            # box_set_flip['ori_cls'], box_set_flip['ori_reg'] = ori_cls_reg(box['3dbdb']['orientation'], flip=True)
            box_set_flip['ori_cls'], box_set_flip['ori_reg'] = ori_cls_reg(box['3dbdb']['basis'][1, :], bin, flip=True)
            box_set_flip['size_reg'] = size_avg_residual(box['3dbdb']['coeffs'][0], size_template, box['2dbdb']['classname'])
            box_set_flip['x_cls'], box_set_flip['x_reg'], box_set_flip['y_cls'], box_set_flip['y_reg'], box_set_flip['z_cls'], box_set_flip['z_reg'] = centroid_cls_reg(box['3dbdb']['centroid'][0], bin, flip=True)
            box_set_flip['centroid_cls'], box_set_flip['centroid_reg'] = bin_cls_reg(bin['centroid_bin'], np.linalg.norm(box['3dbdb']['centroid'][0]))
            box_set_flip['bdb_pos'] = [int(2 * K[0, 2] - box['2dbdb']['x2']), box['2dbdb']['y1'], int(2 * K[0, 2] - box['2dbdb']['x1']), box['2dbdb']['y2']]
            box_set_flip['bdb2d'] = [int(2 * K[0, 2] - box['2dbdb']['x2']) / float(K[0, 2]), box['2dbdb']['y1'] / float(K[1, 2]),
                                       int(2 * K[0, 2] - box['2dbdb']['x1']) / float(K[0, 2]), box['2dbdb']['y2'] / float(K[1, 2])]
            box_set_flip['size_cls'] = OBJ_CATEGORY_CLEAN.index(box['2dbdb']['classname'])
            coeffs_flip = size_from_template(box_set_flip['size_reg'], size_template, OBJ_CATEGORY_CLEAN[box_set_flip['size_cls']])
            centroid_flip = np.array([num_from_bins(bin['x_bin'], box_set_flip['x_cls'], box_set_flip['x_reg']), num_from_bins(bin['y_bin'], box_set_flip['y_cls'], box_set_flip['y_reg']), num_from_bins(bin['z_bin'], box_set_flip['z_cls'], box_set_flip['z_reg'])])
            basis_flip = basis_from_ori(num_from_bins(bin['ori_bin'], box_set_flip['ori_cls'], box_set_flip['ori_reg']))
            box_set_flip['bdb3d'] = get_corners_of_bb3d(basis_flip, coeffs_flip, centroid_flip)
            delta_2d_flip = [- delta_2d[0], delta_2d[1]]
            box_set_flip['delta_2d'] = delta_2d_flip
            # print box_set['delta_2d'], box_set_flip['delta_2d']
            boxes_out_flip.append(box_set_flip)
        if len(boxes_out) == 0:
            continue
        data = dict()
        data['rgb_path'] = op.join(PATH.metadata_root, 'images', '%06d.jpg' % (i+1))
        data['boxes'] = list_of_dict_to_dict_of_list(boxes_out)
        data['camera'] = camera
        data['layout'] = layout
        data['sequence_id'] = i + 1
        # fliped data
        data_flip = dict()
        data_flip['rgb_path'] = op.join(PATH.metadata_root, 'images', '%06d_flip.jpg' % (i+1))
        # img_flip = Image.open(data['rgb_path']).transpose(Image.FLIP_LEFT_RIGHT)
        # img_flip.save(data_flip['rgb_path'])
        data_flip['boxes'] = list_of_dict_to_dict_of_list(boxes_out_flip)
        data_flip['camera'] = camera_flip
        data_flip['layout'] = layout_flip
        data_flip['sequence_id'] = i + 1
        if shift:
            save_path = op.join(PATH.metadata_root, 'sunrgbd_train_test_data', str(i+1) + '_shift_5' + '.pickle')
            save_path_flip = op.join(PATH.metadata_root, 'sunrgbd_train_test_data', str(i+1) + '_flip' + '_shift_5' + '.pickle')
        else:
            save_path = op.join(PATH.metadata_root, 'sunrgbd_train_test_data', str(i + 1) + '.pickle')
            save_path_flip = op.join(PATH.metadata_root, 'sunrgbd_train_test_data', str(i + 1) + '_flip' + '.pickle')
        if (i + 1) <= 5050:
            test_path.append(save_path)
        else:
            train_path.append(save_path)
        with open(save_path, 'w') as f:
            pickle.dump(data, f)
        f.close()
        with open(save_path_flip, 'w') as f:
            pickle.dump(data_flip, f)
        f.close()
    print np.array(layout_centroid).mean(axis=0)
    print np.array(layout_coeffs).mean(axis=0)
    if not shift:
        with open(op.join(PATH.metadata_root, 'train.json'), 'w') as f:
            json.dump(train_path, f)
        f.close()
        with open(op.join(PATH.metadata_root, 'test.json'), 'w') as f:
            json.dump(test_path, f)
        f.close()
Example #40
0
class LinearSVM:
    @staticmethod
    def do(train_data, train_label, test_data, test_label=None, adjust_parameters=True):
        train_data = np.array(train_data).squeeze()
        train_label = np.array(train_label).squeeze()
        test_data = np.array(test_data).squeeze()
        if test_label is not None:
            test_label = np.array(test_label).squeeze()
        svm = LinearSVC()
        svm.fit(train_data, train_label)
        predicts = svm.predict(test_data)
        acc = None
        if test_label is not None:
            acc = accuracy_score(test_label, predicts)
            print acc
        return predicts

if __name__ == '__main__':
    data = scio.loadmat('/home/give/PycharmProjects/MedicalImage/BoVW/data_256_False.mat')
    train_features = data['train_features']
    val_features = data['val_features']
    train_labels = data['train_labels']
    val_labels = data['val_labels']
    val_labels = np.squeeze(val_labels)
    print np.shape(train_features), np.shape(train_labels)
    predicted_label = SVM.do(train_features, train_labels, val_features, val_labels, adjust_parameters=True)

    np.save('./predicted_res.npy', predicted_label)
    # predicted_label = np.load('./predicted_res.npy')
    calculate_acc_error(predicted_label, val_labels)
Example #41
0
 def load_file(self, filename: str):
     full_path = os.path.join(self.path, filename)
     data = sio.loadmat(full_path)
     return RabiData(data['power'][0][0], data['taus'][0], data['zs'][0])
import numpy as np
import scipy.io as io
import cv2
import NeuralNetwork
import SVM
import LogisticReg

data = io.loadmat("ExtYaleB10.mat")

train = data['train']
test = data['test']
train = np.ndarray.tolist(train)[0]
test = np.ndarray.tolist(test)[0]
train = np.array(train)
test = np.array(test)
y_train = np.zeros((500, 10))
y_test = np.zeros((140, 10))

k = 0
for i in range(10):
    for j in range(k, k + 50):
        y_train[j][i] = 1
    k += 50

k = 0
for i in range(10):
    for j in range(k, k + 14):
        y_test[j][i] = 1
    k += 14

x_train = []
Example #43
0
import scipy.io as sio

def gen_line(lista):
	st = ''
	z = len(lista)
	for i in range (0,z):
		if i > 0:
			st += ' '
		st += str(lista[i])
	st += '\n'
	return st

y_fundo = (sio.loadmat('data/y_fundo_30cm.mat'))['y_fundo'][0]
y_topo = (sio.loadmat('data/y_topo_30cm.mat'))['y_topo'][0]

fp_topo = open("dataWin/y_topo.txt","w")
fp_topo.write(gen_line(y_topo))
fp_topo.close()

fp_fundo = open("dataWin/y_fundo.txt","w")
fp_fundo.write(gen_line(y_fundo))
fp_fundo.close()
Example #44
0
# exercise 8.2.6

from matplotlib.pyplot import figure, plot, subplot, title, show, bar
import numpy as np
from scipy.io import loadmat
import neurolab as nl
from sklearn import cross_validation
from scipy import stats

# Load Matlab data file and extract variables of interest
mat_data = loadmat('..\\Data\\wine2.mat')
attributeNames = [name[0] for name in mat_data['attributeNames'][0]]
X = mat_data['X']
y = X[:, 10]  # alcohol contents (target)
X = X[:, 1:10]  # the rest of features
N, M = X.shape
C = 2

# Normalize data
X = stats.zscore(X)

# Normalize and compute PCA (UNCOMMENT to experiment with PCA preprocessing)
#Y = stats.zscore(X,0);
#U,S,V = np.linalg.svd(Y,full_matrices=False)
#V = V.T
# Components to be included as features
#k_pca = 3
#X = X @ V[:,0:k_pca]
#N, M = X.shape

# Parameters for neural network classifier
Example #45
0
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import cross_val_score
import matplotlib.pyplot as plt
from topography import topography

if __name__ == '__main__':

    subject = 1
    tmin = -0.5  # in sec.
    tmax = 1.0  # in sec.
    cv = 5  # numbers of fold of cross-validation
    filename = 'data/train_subject%02d.mat' % subject
    layout_filename = '../additional_files/Vectorview-all.lout'

    print "Loading %s" % filename
    data = loadmat(filename, squeeze_me=True)
    X = data['X']
    y = data['y']
    sfreq = data['sfreq']

    print "Applying the desired time window: [%s, %s] sec." % (tmin, tmax)
    time = np.linspace(-0.5, 1.0, 375)
    time_window = np.logical_and(time >= tmin, time <= tmax)
    X = X[:, :, time_window]
    time = time[time_window]

    print "Loading channels name."
    channel_name = np.loadtxt(layout_filename,
                              skiprows=1,
                              usecols=(5, ),
                              delimiter='\t',
Example #46
0
def evaluate_SA(source, target, project, gamma, method, n):
    """
    Report the cross-domain sentiment classification accuracy. 
    """
    # Parameters to reduce the number of features in the tail
    domainTh = {'books': 5, 'dvd': 5, 'kitchen': 5, 'electronics': 5}

    # gamma = 1.0
    print "Source Domain", source
    print "Target Domain", target
    if project:
        print "Projection ON", "Gamma = %f" % gamma
    else:
        print "Projection OFF"
    # Load the projection matrix.
    M = sp.csr_matrix(
        sio.loadmat("../work/%s-%s/proj.mat" % (source, target))['proj'])
    (nDS, h) = M.shape

    # Load pivots.
    pivotsFile = "../work/%s-%s/obj/%s" % (source, target, method)
    features = pi.load_stored_obj(pivotsFile)
    pivots = dict(features[:n]).keys()
    print "selecting top-%d features in %s as pivots" % (n, method)

    # Load features and get domain specific features
    fname = "../work/%s-%s/obj/freq" % (source, target)
    if "un_" in method:
        fname = "../work/%s-%s/obj/un_freq" % (source, target)
    features = pi.load_stored_obj(fname)
    feats = selectTh(dict(features), domainTh[source])
    print "experimental features = ", len(feats)
    #print feats

    DSwords = [item for item in feats if item not in pivots]

    feats = feats.keys()
    # write train feature vectors.
    trainFileName = "../work/%s-%s/trainVects.SCL" % (source, target)
    testFileName = "../work/%s-%s/testVects.SCL" % (source, target)
    featFile = open(trainFileName, 'w')
    count = 0
    for (label, fname) in [(1, 'train.positive'), (-1, 'train.negative')]:
        F = open("../data/%s/%s" % (source, fname))
        for line in F:
            count += 1
            #print "Train ", count
            words = set(line.strip().split())
            # write the original features.
            featFile.write("%d " % label)
            x = sp.lil_matrix((1, nDS), dtype=np.float64)
            for w in words:
                #featFile.write("%s:1 " % w)
                if w in feats:
                    x[0, feats.index(w)] = 1
            # write projected features.
            if project:
                y = x.tocsr().dot(M)
                for i in range(0, h):
                    featFile.write("proj_%d:%f " % (i, gamma * y[0, i]))
            featFile.write("\n")
        F.close()
    featFile.close()
    # write test feature vectors.
    featFile = open(testFileName, 'w')
    count = 0
    for (label, fname) in [(1, 'test.positive'), (-1, 'test.negative')]:
        F = open("../data/%s/%s" % (target, fname))
        for line in F:
            count += 1
            #print "Test ", count
            words = set(line.strip().split())
            # write the original features.
            featFile.write("%d " % label)
            x = sp.lil_matrix((1, nDS), dtype=np.float64)
            for w in words:
                #featFile.write("%s:1 " % w)
                if w in feats:
                    x[0, feats.index(w)] = 1
            # write projected features.
            if project:
                y = x.dot(M)
                for i in range(0, h):
                    featFile.write("proj_%d:%f " % (i, gamma * y[0, i]))
            featFile.write("\n")
        F.close()
    featFile.close()
    # Train using classias.
    modelFileName = "../work/%s-%s/model.SCL" % (source, target)
    trainLBFGS(trainFileName, modelFileName)
    # Test using classias.
    [acc, correct, total] = testLBFGS(testFileName, modelFileName)
    intervals = clopper_pearson(correct, total)
    print "Accuracy =", acc
    print "Intervals=", intervals
    print "###########################################\n\n"
    return acc, intervals
Example #47
0
                        help="ROI image width in pixels")
    parser.set_defaults(height=648, width=486)
    return parser.parse_args()


if __name__ == '__main__':
    args = get_args()

    data_Craw = []  # raw calcium trace
    data_A = []  # spatial footprint
    scores = []  # labels from manual review of ROIs from CNMFE
    height = args.height
    width = args.width

    for filename in tqdm(range(len(args.input))):
        data = loadmat(args.input[filename])
        # spatial downsample factor
        ds = data['ssub']

        data_Craw.append(data['C_raw'][:, :])

        A = data['A'].transpose(1, 0)
        A = A.reshape(
            (data['C_raw'].shape[0], int(height / ds), int(width / ds)))

        data_A.append(A)

        for tt in range(data['C_raw'].shape[0]):
            # ROI labels (keep, 1, or exclude, 0)
            if tt in data['keep']:
                scores.append(1)
def main(meta_data_dir='./data/meta_data', reingest=True):
    meta_data_dir = pathlib.Path(meta_data_dir)
    if not meta_data_dir.exists():
        raise FileNotFoundError(f'Path not found!! {meta_data_dir.as_posix()}')

    # ==================== DEFINE CONSTANTS =====================

    # ---- inferred from paper ----
    hemi = 'left'
    skull_reference = 'bregma'
    photostim_devices = {473: 'LaserGem473', 594: 'LaserCoboltMambo100',  596: 'LaserCoboltMambo100'}

    # ---- from lookup ----
    probe = 'A4x8-5mm-100-200-177'
    electrode_config_name = 'silicon32'
    project_name = 'li2015'

    # ================== INGESTION OF METADATA ==================

    # ---- delete all Sessions ----
    if reingest:
        (experiment.Session & (experiment.ProjectSession & {'project_name': project_name}).fetch('KEY')).delete()

    # ---- insert metadata ----
    meta_data_files = meta_data_dir.glob('*.mat')
    for meta_data_file in tqdm(meta_data_files):
        print(f'-- Read {meta_data_file} --')
        meta_data = sio.loadmat(meta_data_file, struct_as_record=False, squeeze_me=True)['meta_data']

        # ==================== person ====================
        person_key = dict(username=meta_data.experimenters,
                          fullname=meta_data.experimenters)
        lab.Person.insert1(person_key, skip_duplicates=True)

        # ==================== subject gene modification ====================
        modified_genes = (meta_data.animalGeneModification
                          if isinstance(meta_data.animalGeneModification, (np.ndarray, list))
                          else [meta_data.animalGeneModification])
        lab.ModifiedGene.insert((dict(gene_modification=g, gene_modification_description=g)
                                 for g in modified_genes), skip_duplicates=True)

        # ==================== subject strain ====================
        animal_strains = (meta_data.animalStrain
                          if isinstance(meta_data.animalStrain, (np.ndarray, list))
                          else [meta_data.animalStrain])
        lab.AnimalStrain.insert(zip(animal_strains), skip_duplicates=True)

        # ==================== subject ====================
        animal_id = (meta_data.animalID[0]
                     if isinstance(meta_data.animalID, (np.ndarray, list)) else meta_data.animalID)
        animal_source = (meta_data.animalSource[0]
                         if isinstance(meta_data.animalSource, (np.ndarray, list)) else meta_data.animalSource)
        subject_key = dict(subject_id=int(re.search('\d+', animal_id).group()),
                           sex=meta_data.sex[0].upper() if len(meta_data.sex) != 0 else 'U',
                           species=meta_data.species,
                           animal_source=animal_source)
        try:
            date_of_birth = parse_date(meta_data.dateOfBirth)
            subject_key['date_of_birth'] = date_of_birth
        except:
            pass

        lab.AnimalSource.insert1((animal_source,), skip_duplicates=True)

        with lab.Subject.connection.transaction:
            if subject_key not in lab.Subject.proj():
                lab.Subject.insert1(subject_key)
                lab.Subject.GeneModification.insert((dict(subject_key, gene_modification=g) for g in modified_genes),
                                                     ignore_extra_fields=True)
                lab.Subject.Strain.insert((dict(subject_key, animal_strain=strain) for strain in animal_strains),
                                                     ignore_extra_fields=True)

        # ==================== session ====================
        session_key = dict(subject_key, username=person_key['username'],
                           session=len(experiment.Session & subject_key) + 1,
                           session_date=parse_date(meta_data.dateOfExperiment + ' ' + meta_data.timeOfExperiment))
        experiment.Session.insert1(session_key, ignore_extra_fields=True)
        experiment.ProjectSession.insert1({**session_key, 'project_name': project_name}, ignore_extra_fields=True)

        print(f'\tInsert Session - {session_key["subject_id"]} - {session_key["session_date"]}')

        # ==================== Probe Insertion ====================
        brain_location_key = dict(brain_area=meta_data.extracellular.recordingLocation,
                                  hemisphere=hemi)
        insertion_loc_key = dict(skull_reference=skull_reference,
                                 ap_location=meta_data.extracellular.recordingCoordinates[0] * 1000,  # mm to um
                                 ml_location=meta_data.extracellular.recordingCoordinates[1] * 1000,  # mm to um
                                 dv_location=meta_data.extracellular.recordingCoordinates[2] * -1)    # already in um

        with ephys.ProbeInsertion.connection.transaction:
            ephys.ProbeInsertion.insert1(dict(session_key, insertion_number=1, probe=probe,
                                              electrode_config_name=electrode_config_name), ignore_extra_fields=True)
            ephys.ProbeInsertion.InsertionLocation.insert1(dict(session_key, **insertion_loc_key,
                                                                insertion_number=1),
                                                           ignore_extra_fields=True)
            ephys.ProbeInsertion.RecordableBrainRegion.insert1(dict(session_key, **brain_location_key,
                                                                    insertion_number=1),
                                                               ignore_extra_fields=True)
            ephys.ProbeInsertion.ElectrodeSitePosition.insert((dict(
                session_key, insertion_number=1, probe=probe, electrode_config_name=electrode_config_name,
                electrode_group=0, electrode= site_idx + 1,
                electrode_posx=x*1000, electrode_posy=y*1000, electrode_posz=z*1000)
                for site_idx, (x, y, z) in enumerate(meta_data.extracellular.siteLocations)),
                ignore_extra_fields=True)

        print(f'\tInsert ProbeInsertion - Location: {brain_location_key}')

        # ==================== Virus ====================
        if 'virus' in meta_data._fieldnames and isinstance(meta_data.virus, sio.matlab.mio5_params.mat_struct):
            virus_info = dict(
                virus_source=meta_data.virus.virusSource,
                virus=meta_data.virus.virusID,
                virus_lot_number=meta_data.virus.virusLotNumber if len(meta_data.virus.virusLotNumber) != 0 else '',
                virus_titer=meta_data.virus.virusTiter.replace('x10', '') if meta_data.virus.virusTiter != 'untitered' else None)
            virus.Virus.insert1(virus_info, skip_duplicates=True)

            # -- BrainLocation
            brain_location_key = dict(brain_area=meta_data.virus.infectionLocation,
                                      hemisphere=hemi)
            virus_injection = dict(
                {**virus_info, **subject_key, **brain_location_key},
                injection_date=parse_date(meta_data.virus.injectionDate))

            virus.VirusInjection.insert([dict(virus_injection,
                                              injection_id=inj_idx + 1,
                                              ap_location=coord[0] * 1000,
                                              ml_location=coord[1] * 1000,
                                              dv_location=coord[2] * 1000 * -1,
                                              injection_volume=vol)
                                         for inj_idx, (coord, vol) in enumerate(zip(meta_data.virus.infectionCoordinates,
                                                                                    meta_data.virus.injectionVolume))],
                                        ignore_extra_fields=True, skip_duplicates=True)
            print(f'\tInsert Virus Injections - Count: {len(meta_data.virus.injectionVolume)}')

        # ==================== Photostim ====================
        if 'photostim' in meta_data._fieldnames and isinstance(meta_data.photostim, sio.matlab.mio5_params.mat_struct):
            photostimLocation = (meta_data.photostim.photostimLocation
                                 if isinstance(meta_data.photostim.photostimLocation, np.ndarray)
                                 else np.array([meta_data.photostim.photostimLocation]))
            photostimCoordinates = (meta_data.photostim.photostimCoordinates
                                    if isinstance(meta_data.photostim.photostimCoordinates[0], np.ndarray)
                                    else np.array([meta_data.photostim.photostimCoordinates]))
            photostim_locs = []
            for ba in set(photostimLocation):
                coords = photostimCoordinates[photostimLocation == ba]
                photostim_locs.append((ba, coords))

            for stim_idx, (loc, coords) in enumerate(photostim_locs):

                experiment.Photostim.insert1(dict(
                    session_key, photo_stim=stim_idx + 1,
                    photostim_device=photostim_devices[meta_data.photostim.photostimWavelength]),
                    ignore_extra_fields=True)

                experiment.Photostim.PhotostimLocation.insert([
                    dict(session_key,  photo_stim=stim_idx + 1,
                         brain_area=loc, skull_reference=skull_reference,
                         ap_location=coord[0] * 1000,
                         ml_location=coord[1] * 1000,
                         dv_location=coord[2] * 1000 * -1) for coord in coords], ignore_extra_fields=True)

            print(f'\tInsert Photostim - Count: {len(photostim_locs)}')

    experiment.PhotostimBrainRegion.populate(display_progress=True)
Example #49
0
def setup_training(args, device, batch_size=1024, learning_rate=0.001, step_size=100, gamma=0.5, l1_weight=0.):

    ################################################################
    # create results_dd
    ################################################################
    results_dd = {}
    #################################################################
    # read training data
    ################################################################

    d = sio.loadmat(args.data_fp)
    usol = d['output'][:, :args.time_idx+1]
    t_grid = d['t'][:, :args.time_idx+1]
    x_grid = d['x']
    logging.info("USOL SHAPE {}, T_GRID SHAPE: {}, X_GRID SHAPE: {}".format(usol.shape,
                                                                            t_grid.shape,
                                                                            x_grid.shape))

    train_dataset = TimeScalingDataSet(usol, t_grid, x_grid, ones_vector=False)
    logging.info("Dataset: {}".format(train_dataset))
    results_dd['ntrain'] = len(train_dataset)
    results_dd['prediction_time'] = args.time_idx

    train_data_loader = torch.utils.data.DataLoader(train_dataset,
                                                    batch_size=batch_size,
                                                    shuffle=True)

    ################################################################
    # read testing data
    ################################################################
    if not args.no_test:

        d_test = sio.loadmat(args.test_data_fp)
        usol_test = d_test['output'][:,:args.time_idx]
        t_grid_test = d_test['t'][:,:args.time_idx]
        x_grid_test = d_test['x']

        test_dataset = TimeScalingDataSet(usol_test, t_grid_test, x_grid_test, ones_vector=False)
        logging.info("Test Dataset: {}".format(test_dataset))
        results_dd['ntest'] = len(test_dataset)

        test_data_loader = torch.utils.data.DataLoader(test_dataset,
                                                        batch_size=batch_size,
                                                        shuffle=True)

    ##################################################################
    # initialize model and optimizer
    ##################################################################
    model_params = {'width': args.width, 'modes':args.freq_modes}

    model = FNO1dComplex(width=args.width, modes=args.freq_modes).to(device)

    results_dd.update(model_params)

    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                    step_size=step_size,
                                                    gamma=gamma)
    results_dd['learning_rate'] = learning_rate

    ##################################################################
    # Call training loop
    ##################################################################
    logging.info("Starting FNO training")
    model = train_loop(model=model,
                                    optimizer=optimizer,
                                    scheduler=scheduler,
                                    start_epoch=0,
                                    end_epoch=args.epochs,
                                    l1_weight=l1_weight,
                                    device=device,
                                    train_data_loader=train_data_loader,
                                    train_df=args.train_df,
                                    do_testing=(not args.no_test),
                                    test_every_n=100,
                                    test_data_loader=test_data_loader,
                                    test_df=args.test_df,
                                    model_path=args.model_fp,
                                    results_dd=results_dd)
    return model, results_dd
import matplotlib.pyplot as plt
import scipy.io as sio
import numpy as np

# Import the .mat files
import_mat = sio.loadmat('coordinate_BS.mat')
import_traces = sio.loadmat('traces.mat')

# Extract .mat file coordinates and data
bs_coordinates = import_mat['BSCoordinates']
traces = import_traces['traces_data']

# Extract and get unique vehicleID
vehicle_id = traces[:, 1]
uniq_id_raw = np.unique(vehicle_id)

# Initialize unique ID vector
ID_vec = np.zeros(len(uniq_id_raw), dtype=int)
N_vehicle = len(ID_vec)
# Fill unique ID vector
for x in range(N_vehicle):
    ID_vec[x] = int(uniq_id_raw[x])


# Initialize list
vehicle_trace = []
for i in range(1000):
    # Extract temporal vector containing all parameters
    tmp = traces[traces[:, 1] == ID_vec[i]]
    # Append only position parameter to the vehicle_trace list
    vehicle_trace.append(tmp[:, 2:4])
Example #51
0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 30 14:01:27 2016

@author: jimmijamma
"""

# we import the .mat file of arrhythmia
import scipy.io as scio
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt

mat_file = scio.loadmat('arrhythmia.mat')
data = mat_file.get('arrhythmia')

data = data[~np.all(data == 0, axis=1)]  # deleting eventual zero columns
class_id = data[:, -1]
n_classes = int(max(class_id))

(N, F) = np.shape(data)

mx_classes = np.zeros((N, n_classes))
for i in range(0, N - 1):
    mx_classes[i][int(class_id[i]) - 1] = 1

data = data[:, :-1]
(N, F) = np.shape(data)

mean = np.mean(data)
        sio.savemat('non_z_simple_output_%s' %(region_key),{'return_dict':return_dict},format='5')


        return(return_dict)




###############################################
#start ########################################
###############################################

ts_filename = glob.glob('Extracted*_timestamps.mat')[0]
extracted_filename = ts_filename[:-15] + '.mat'

a = sio.loadmat(extracted_filename)
timestamps = sio.loadmat(ts_filename)

print extracted_filename

#create matrix of trial-by-trial info
trial_breakdown = timestamps['trial_breakdown']
condensed = np.zeros((np.shape(trial_breakdown)[0],10))

#0: disp_rp, 1: succ scene 2: failure scene, 3: rnum, 4: pnum, 5:succ/fail, 6: value, 7: motiv, 8: disp_rp bin

condensed[:,0] = trial_breakdown[:,1]
condensed[:,1] = trial_breakdown[:,2]
condensed[:,2] = trial_breakdown[:,3]
condensed[:,3] = trial_breakdown[:,5]
condensed[:,4] = trial_breakdown[:,7]
Example #53
0
def load(path):
    return loadmat(path)
Example #54
0
"""
 FEDFUNDS     : Effective Fed Funds Rate, percent.
 GDP          : Real GDP, 3 decimals, billions of Chained 2000 Dollars.
 INDPRO       : Industrial Production Index
 CPIAUCSL     : Consumer Price Index for All Urban Consumers: All Items
 UNRATE       : Civilian Unemployment Rate
 PAYEMS       : All Employees: Total Nonfarm Payrolls
"""

data = pd.read_csv('quarterly.csv', index_col = 'DATE')


import scipy.io as sio

factors_data = sio.loadmat('factors.mat')

# process the factors: raw

factors_q = pd.DataFrame(factors_data['quarterly'])

# drop the last period (as done with the data)
factors_q = factors_q[:-1]

# drop every col that has missings after the beginning
factors_q = factors_q.ix[:, factors_q[5:].isnull().sum(axis=0)==0]



df_raw = data.reset_index(drop = True)
df_raw.columns = ['GDP', 'CPI', 'FF', 'IP', 'emp', 'unemp']
Example #55
0
def classify(model, inputs):
    in_img = inputs['photo']
    img_ori = np.array(in_img)
    img_fp = 'samples/test1.jpg'

    face_detector = dlib.get_frontal_face_detector()

    # 3. forward
    tri = sio.loadmat('visualize/tri.mat')['tri']
    transform = transforms.Compose(
        [ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)])
    #print(transform)
    rects = face_detector(img_ori, 1)

    pts_res = []
    Ps = []  # Camera matrix collection
    poses = []  # pose collection, [todo: validate it]
    vertices_lst = []  # store multiple face vertices
    ind = 0
    suffix = get_suffix(img_fp)
    for rect in rects:
        # - use detected face bbox
        bbox = [rect.left(), rect.top(), rect.right(), rect.bottom()]
        roi_box = parse_roi_box_from_bbox(bbox)

        img = crop_img(img_ori, roi_box)

        # forward: one step
        img = cv2.resize(img,
                         dsize=(STD_SIZE, STD_SIZE),
                         interpolation=cv2.INTER_LINEAR)
        input = transform(img).unsqueeze(0)
        print(input)
        with torch.no_grad():

            if mode == 'gpu':
                input = input.cuda()

            param = model(input)
            param = param.squeeze().cpu().numpy().flatten().astype(np.float32)

        # 68 pts
        pts68 = predict_68pts(param, roi_box)

        # two-step for more accurate bbox to crop face
        if bbox_init == 'two':
            roi_box = parse_roi_box_from_landmark(pts68)
            img_step2 = crop_img(img_ori, roi_box)
            img_step2 = cv2.resize(img_step2,
                                   dsize=(STD_SIZE, STD_SIZE),
                                   interpolation=cv2.INTER_LINEAR)
            input = transform(img_step2).unsqueeze(0)
            with torch.no_grad():
                if mode == 'gpu':
                    input = input.cuda()
                param = model(input)
                param = param.squeeze().cpu().numpy().flatten().astype(
                    np.float32)

            pts68 = predict_68pts(param, roi_box)

        pts_res.append(pts68)
        P, pose = parse_pose(param)
        Ps.append(P)
        poses.append(pose)

        vertices = predict_dense(param, roi_box)
        vertices_lst.append(vertices)
        ind += 1

    pncc_feature = cpncc(img_ori, vertices_lst, tri - 1)
    output = pncc_feature[:, :, ::-1]
    print(type(output))
    pilImg = transforms.ToPILImage()(np.uint8(output))

    return {"image": pilImg}
Example #56
0
import tensorflow as tf
import scipy.io as scio
import numpy as np

# 数据准备
x = np.array(scio.loadmat("D:/cache/Matlab/项目/xSpilit.mat")["xSpilit"])  # (m, 200)
y = np.array(scio.loadmat("D:/cache/Matlab/项目/yTrain.mat")["yTrain"])  # (m, 1)


def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)


def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)


# 定义输入
data_x = tf.placeholder(tf.float32, [None, 200])
data_y = tf.placeholder(tf.int32, [None, 1])
# 格式化输入
input_x = tf.reshape(data_x, [-1, 200, 1, 1])  # (m,200,1,1)
CLASS = 3
one_hot = tf.one_hot(data_y, CLASS, 1, 0)
input_y = tf.reshape(one_hot, [-1, CLASS])  # (m,3)

# 定义网络
'''
layer1:
Example #57
0
aes = AES.new(master_key, AES.MODE_ECB)

for file_step in range(int(num_traces / file_size)):
    # Variables we want to store
    all_key_guesses = []
    model_values = []
    traces = []
    ciphertexts = []

    end_size = file_size * (file_step + 1)
    start = file_size * file_step + step_size

    # Perform the conversion
    for file_index in range(start, end_size + 1, step_size):
        file = "traces{}.mat".format(file_index)
        x = loadmat('{}/{}'.format(path, file))
        ptexts = x['ptexts']

        print("Opened {}".format(file_index))

        for i in range(step_size):
            # Do the encryption of the plaintext
            plain = conv_to_plain(x['ptexts'][i])
            bytes_ct = aes.encrypt(plain)

            # Add the traces
            traces.append(x['traces'][i])

            # Select a byte
            ct_byte = int(bytes_ct[sub_key])
            ciphertexts.append(ct_byte)
Example #58
0
        loss = -np.mean(np.sum(log_class_prob * data['targets'], axis=0))

        print('For the', data_name, 'data, the classification cross-entropy '
            'loss is', loss, ', and the classification error rate (i.e. the '
            'misclassification rate) is', error_rate)

    report_calls_to_sample_bernoulli = True


#### ---- Main program

# Part 1 - Initialization
# PS: This initialization is analogous to a4_init.m

# Load base for pseudorandom generator
randomness_source = sio.loadmat('a4_randomness_source.mat')['randomness_source']

# Load data: arrays of 16x16 images of greyscale hand-written digits
from_data_file = sio.loadmat('data_set.mat')['data']
training_data = {}
training_data['inputs'] = from_data_file[0][0]['training'][0][0]['inputs']
training_data['targets'] = from_data_file[0][0]['training'][0][0]['targets']
validation_data = {}
validation_data['inputs'] = from_data_file[0][0]['validation'][0][0]['inputs']
validation_data['targets'] = from_data_file[0][0]['validation'][0][0]['targets']
test_data = {}
test_data['inputs'] = from_data_file[0][0]['test'][0][0]['inputs']
test_data['targets'] = from_data_file[0][0]['test'][0][0]['targets']

report_calls_to_sample_bernoulli = False
Example #59
0
    appear_times = tf.reshape(appear_times, [-1, 1])

    diff = diff / tf.cast((1 + appear_times), tf.float32)
    diff = alpha * diff

    centers_update_op = tf.scatter_sub(centers, labels, diff)

    return loss, centers, centers_update_op


print('-----Importing Dataset-----')
dataset = input('Please input the name of Dataset(IN, SS or KSC):')
Dataset = dataset.upper()

if Dataset == 'KSC':
    KSC = sio.loadmat('datasets/KSC.mat')
    gt_KSC = sio.loadmat('datasets/KSC_gt.mat')
    data_hsi = KSC['KSC']
    gt_hsi = gt_KSC['KSC_gt']
    TOTAL_SIZE = 5211
    VALIDATION_SPLIT = 0.962875  # 200: 0.962875 400: 0.9245 600: 0.8862 800:84765

if Dataset == 'IN':
    mat_data = sio.loadmat('datasets/Indian_pines_corrected.mat')
    data_hsi = mat_data['indian_pines_corrected']
    mat_gt = sio.loadmat('datasets/Indian_pines_gt.mat')
    gt_hsi = mat_gt['indian_pines_gt']
    TOTAL_SIZE = 10249
    VALIDATION_SPLIT = 0.9812  # 200:0.9812 400:0.9617 600: 0.9422 800:

if Dataset == 'SS':
Example #60
0
import pvcMetrics as pvc
import scipy.io as sio
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal

# Parse command line args
parser = argparse.ArgumentParser(description='Compute relative mean squared error in space, averaged across time, of two sets of time series electrical potential signals.')
parser.add_argument('-gt', '--ground_truth', help='filename of the ground truth .mat file', required=True)
parser.add_argument('-sol', '--solution', help='filename of the solution .mat file', required=True)

args = parser.parse_args()

# Load ground truth data
ground_truth_dict = {}
ground_truth_dict.update(sio.loadmat(args.ground_truth))

gt_potentials_array  = ground_truth_dict['X_GT']
gt_number_of_nodes   = (gt_potentials_array.shape)[0]
gt_number_of_samples = (gt_potentials_array.shape)[1]

# Load solution data
solution_dict = {}
solution_dict.update(sio.loadmat(args.solution))

sol_potentials_array  = solution_dict['X']
sol_number_of_nodes   = (sol_potentials_array.shape)[0]
sol_number_of_samples = (sol_potentials_array.shape)[1]

t = np.arange(0, gt_number_of_samples, 1);
X_GT = gt_potentials_array