Ejemplo n.º 1
0
    def get_K(self, t):
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore",
                                    category=PendingDeprecationWarning)

            K = _KernelR.mat_exp(t * self.A)
            return np.array(np.log(K))
def make_legitimate_covariance_matrix(ndim=3, lo=2, hi=15) -> list:
    from numpy import zeros, int16
    from numpy.random import multivariate_normal, randint
    from numpy import warnings
    n = ndim
    for _ in range(100000):
        mx = zeros(shape=(n, n), dtype=int16)
        for i in range(n):
            for j in range(i, n):
                mx[i, j:] = randint(lo, hi, size=len(mx[i, j:]))
        cm = mx | mx.transpose()

        with warnings.catch_warnings():
            warnings.filterwarnings('error')
            try:
                mx = multivariate_normal(mean=[
                    0,
                ] * n, cov=cm, size=200)
                print(cm)
                return cm.tolist()
                break
            except RuntimeWarning:
                continue
    else:
        from warnings import warn
        warn("failed to find a covariance matrix. try again", Warning)
        return None
Ejemplo n.º 3
0
    def get_K(self, t):
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore",
                                    category=PendingDeprecationWarning)

            K = np.linalg.inv(np.matlib.eye(self.A.shape[0]) - t * self.A)
            return np.array(np.log(K))
Ejemplo n.º 4
0
    def get_K(self, t):
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore",
                                    category=PendingDeprecationWarning)

            K = _KernelR.mat_exp(-t * self.Ll, n=50)
            if np.any(K < 0):
                # logging.info(t, "K < 0")
                return None
            return np.array(np.log(K))
Ejemplo n.º 5
0
    def get_K(self, alpha):
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore",
                                    category=PendingDeprecationWarning)

            K = np.linalg.inv(self.D - alpha * self.A)
            if np.any(K < 0):
                # logging.info(alpha, "K < 0")
                return None
            return np.array(np.log(K))
Ejemplo n.º 6
0
    def get_K(self, beta):
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore",
                                    category=PendingDeprecationWarning)

            K = np.linalg.inv(np.matlib.eye(self.A.shape[0]) + beta * self.L)
            if np.any(K < 0):
                # logging.info(beta, "K < 0")
                return None
            return np.array(np.log(K))
Ejemplo n.º 7
0
def ProximalDepthSeeker(
        Dose, datax,
        datay):  # same as previous formula but approaching data from the front
    warnings.filterwarnings(
        'ignore')  # Prevents the "NaN" error message in the python display
    index = argmax((asarray(datay)) > Dose)
    y0 = (datay[index - 1])
    y1 = (datay[index])
    x0 = (datax[index - 1])
    x1 = (datax[index])
    slope = (
        (y1 - y0) / (x1 - x0)
    )  # calculate the gradient - note a linear interpolation is performed
    intercept = y0 - (slope * x0)  # calculate the theoretical intercept
    proximal = (Dose - intercept) / slope  # finds x value for given Dose level
    return proximal
Ejemplo n.º 8
0
def make_multidim_blobs(n_blobs=3,
                        n_points=100,
                        n_dim=3,
                        range=100,
                        relative_dispersion=10):
    import builtins
    from numpy import zeros, float16, warnings, diag, abs, uint8, argsort
    from numpy.random import randint, multivariate_normal

    m = n_points // n_blobs
    working_range = 100

    σ2 = (working_range / (n_blobs + 1)**(1 / n_dim) / relative_dispersion)**2
    σ2 = int(σ2 * 0.5), int(σ2 * 1.5)

    X = zeros(shape=(m * n_blobs, n_dim), dtype=float16)
    y = zeros(shape=X.shape[0], dtype=uint8)

    with warnings.catch_warnings():
        warnings.filterwarnings("error")
        for i in builtins.range(n_blobs):
            while True:
                diagonal = randint(*σ2, size=n_dim)
                mx = diag(diagonal)
                [
                    mx.__setitem__([i, slice(0, i, None)],
                                   randint(-1, 1, size=n_dim)[:i])
                    for i in builtins.range(n_dim)
                ]
                Σ = mx | mx.T
                μ = randint(0, working_range, size=n_dim)
                try:
                    mx = multivariate_normal(mean=μ, cov=Σ, size=m)
                    break
                except RuntimeWarning:
                    continue
            X[i * m:i * m + m, :] = mx
            y[i * m:i * m + m] = i
    #the last touches
    X += abs(X.min())
    X *= range / X.max()
    nx = argsort(X[:, 0])
    X = X[nx]
    y = y[nx]
    return X, y
Ejemplo n.º 9
0
def DistalDepthSeeker(
        Dose, datax, datay
):  # Define a formula to find x for a given y in the distal fall off
    warnings.filterwarnings(
        'ignore')  # Prevents the "NaN" error message in the python display
    index = argmax(
        (asarray(list(reversed(datay)))) > Dose
    )  # Find the first value that exceeds the "Dose" value approaching from greatest x value
    y0 = (datay[len(datay) - index - 1]
          )  # These 4 lines fetch the points either side of the value
    y1 = (datay[len(datay) - index])
    x0 = (datax[len(datax) - index - 1])
    x1 = (datax[len(datax) - index])
    slope = (
        (y1 - y0) / (x1 - x0)
    )  # calculate the gradient - note a linear interpolation is performed
    intercept = y0 - (slope * x0)  # calculate the theoretical intercept
    distal = (Dose - intercept) / slope  # finds x value for given Dose level
    return distal
Ejemplo n.º 10
0
        return Plane([point1, point2, point3])

    @staticmethod
    def get_plane_given_normal_vector_and_a_point(normal_vector, point):
        return Plane([normal_vector, point])

    @staticmethod
    def __check_if_arg_is_vector__(vector):
        if (vector.__class__.__name__ != Vector.__name__):
            raise TypeError("Argument must be of Type: " +
                            str(Vector.__name__))

    @staticmethod
    def __check_if_arg_is_plane__(plane):
        if (plane.__class__.__name__ != Plane.__name__):
            raise TypeError("Argument must be of Type: " + str(Plane.__name__))


def getRandomVectors(n=5, dimentions=3):
    from random import randrange
    ret = []
    for i in range(n):
        temp = []
        for j in range(dimentions):
            temp.append(randrange(-100, 100))
        ret.append(temp)
    return ret


warnings.filterwarnings('ignore')
Ejemplo n.º 11
0
def process_participants(participant_list=[],
                         task_list=[],
                         experiment={},
                         cfg={}):
    warnings.filterwarnings('ignore')
    directory_matrix = data_name_list(participant_list, task_list, experiment)
    #print(directory_matrix)
    #    check_data_exists(directory_matrix)

    # don't we only allow participants that have complete data?
    # how does the participant list in the pre-processing GUI get populated?
    data_check = check_for_incomplete_data(directory_matrix)
    if data_check == True:
        pass
    else:
        return data_check

    # this should all be one data frame? and not a bunch of nested lists...
    participant_matrix = []
    output_matrix = []
    field_matrix = []
    fields_trial = ['task', 'task_type', 'trial', 'rotation_angle_deg']
    # we create the block and target data frames from the original... if requested
    # I really don't understand why this has to be so complicated
    fields_block = [
        'task',
        'task_type',
        'block',
    ]
    fields_target = ['task', 'task_type', 'target']
    dependent_variable = 0  # is this used?
    ############ CREATE PARTICIPANT MATRIX ###############
    for participant in (directory_matrix):
        dv_participant = []
        for task in participant:
            dv_task = []
            for trial in task:
                rows = []
                dv_trial = []
                # trial is now actually the relative path to the file...
                trialdf = read_csv(trial)

                #with open(trial, "rb") as csvfile:
                #    csv_reader = csv.reader(csvfile)
                #    for row in csv_reader:
                #        rows.append(row)
                #del(rows[0]) # why not use the column header?
                #for idx in rows:
                #    if idx[5] == "":
                #        idx[5] = "0"
                #    if exp.get_dist([0,0], [float(idx[15]), float(idx[16])])/exp.get_dist([0,0],[float(idx[10]), float(idx[11])]) >= (float(1)/float(3)): # this is calculated for every row?
                # is this guaranteed to always work?
                #        if cfg['dependent_variable'] == 'cursor error':
                #            dependent_variable = degrees(exp.cart2pol([float(idx[15]), float(idx[16])])[1]) - float(idx[6]) # can this get us weird values?
                #        elif cfg['dependent_variable'] == 'reach deviation':
                #            dependent_variable = degrees(exp.cart2pol([float(idx[13]), float(idx[14])])[1]) - float(idx[6])
                #        print(dependent_variable)
                dependent_variable = getTrialReachAngleAt(
                    trialdf, cfg['dependent_variable'])
                dv_trial.extend([
                    trialdf['task_name'][0], trialdf['trial_num'][0],
                    trialdf['rotation_angle'][0],
                    trialdf['targetangle_deg'][0],
                    '%.2f' % (float(dependent_variable)),
                    trialdf['trial_type'][0]
                ])
                dv_task.append(dv_trial)
            dv_participant.extend(dv_task)
        participant_matrix.append(deepcopy(dv_participant))

    #print(participant_matrix)
    ############ PARTICIPANT MATRIX CREATED ################

    ############ REMOVE OUTLIERS###################
    ############ RO: BY WINDOW #####################
#    if cfg['outliers'] == True:
    if cfg['outliers_win'] == True:
        participant_matrix_tmp_win = []
        for participant in participant_matrix:
            participant_rows_tmp = []
            for row in participant:
                if cfg['dependent_variable'] == 'cursor error':
                    if int(row[2]) >= 0:
                        if (float(row[4]) >
                            ((cfg['window_limit']) + float(row[2]))) | (float(
                                row[4]) < ((-cfg['window_limit']))):
                            #                            print "upper bound: " + str((cfg['window_limit']) + float(row[2]))
                            #                            print "lower bound: " + str((-cfg['window_limit']))
                            #                            print "dep_var: " + row[4]
                            row[4] = nan
                    elif int(row[2]) < 0:
                        if (float(row[4]) >
                            ((cfg['window_limit']))) | (float(row[4]) < (
                                (-cfg['window_limit']) + float(row[2]))):
                            #                            print "upper bound: " + str(((cfg['window_limit']/2)))
                            #                            print "lower bound: " + str(((-cfg['window_limit']/2) + float(row[2])))
                            #                            print  "dep_var: " + row[4]
                            row[4] = nan
                elif cfg['dependent_variable'] == 'reach deviation':
                    if int(row[2]) >= 0:
                        if (float(row[4]) >
                            ((cfg['window_limit']))) | (float(row[4]) < (
                                (-cfg['window_limit']) - float(row[2]))):
                            #                            print "upper bound: " + str(((cfg['window_limit']/2)))
                            #                            print "lower bound: " + str(((-cfg['window_limit']/2) - float(row[2])))
                            #                            print "dep_var: " + row[4]
                            row[4] = nan
                    elif int(row[2]) < 0:
                        if (float(row[4]) >
                            ((cfg['window_limit']) - float(row[2]))) | (float(
                                row[4]) < ((-cfg['window_limit']))):
                            #                            print "upper bound: " + str(((cfg['window_limit']/2) - float(row[2])))
                            #                            print "lower bound: " + str(((-cfg['window_limit']/2)))
                            #                            print "dep_var: " + row[4]
                            row[4] = nan
                participant_rows_tmp.append(row)
            participant_matrix_tmp_win.append(participant_rows_tmp)
#                print row

#            participant_array = array(participant)
#            window_idx = ((participant_array[:,4].astype(float) > (cfg['window_limit']/2)) | (participant_array[:,4].astype(float) < (-cfg['window_limit']/2))).nonzero()[0]
#            participant_array[:,4][window_idx] = nan
#            participant_matrix_tmp_win.append(participant_array.tolist())
        participant_matrix = participant_matrix_tmp_win

    ############ RO: BY STANDARD DEVIATION ################
    if cfg['outliers'] == True:
        participant_matrix_tmp = []
        for participant in participant_matrix:
            jump_to = 0
            participant_array = array(participant)
            for task in task_list:
                task_index = (array(participant)[:, 0] == task).nonzero()[0]
                task_array = (array(participant)[:, 4][task_index])
                task_mean = nanmean(task_array.astype(float))
                task_std = nanstd(task_array.astype(float), ddof=1)
                outlier_index = (
                    (task_array.astype(float) >
                     (task_mean + cfg['outlier_scale'] * task_std)) |
                    (task_array.astype(float) <
                     (task_mean - cfg['outlier_scale'] * task_std))
                ).nonzero()[0] + jump_to
                participant_array[:, 4][outlier_index] = nan
                jump_to = jump_to + len(task_index)
            participant_matrix_tmp.append(participant_array.tolist())
        participant_matrix = participant_matrix_tmp

    ############ OUTPUT BY TRIALS ##################
    data_matrix = []
    for idx_0, data in enumerate(participant_matrix):
        fields_trial.append(participant_list[idx_0])
        if idx_0 == 0:
            data_matrix = array(data)[:, [0, 5, 1, 2, 4]].tolist()
        else:
            dv_column = []
            for row in data:
                dv_column.append(row[4])
            for idx_1, row in enumerate(data_matrix):
                data_matrix[idx_1].append(dv_column[idx_1])
    ############ Standard Deviation & Average In Participants #################
    tmp_data = []
    fields_trial.extend(["average", "sd"])
    for row in array(data_matrix):
        participant_average = nanmean(row[4:].astype(float))
        participant_std = nanstd(row[4:].astype(float), ddof=1)
        row = row.tolist()
        row.append('%.2f' % (float(participant_average)))
        row.append('%.2f' % (float(participant_std)))
        tmp_data.append(row)
    output_matrix.append(deepcopy(tmp_data))

    ############ OUTPUT BY BLOCKS ##################
    ##### USING PARTICIPANT MATRIX TO PRODUCE BLOCK DATA #########
    data_matrix = []
    participant_matrix_blocked = []
    for participant_data in participant_matrix:
        task_matrix_blocked = []
        for task in task_list:
            task_row = []
            task_index = (array(participant_data)[:, 0] == task).nonzero()[0]
            blocksize = task_to_blocksize(task, experiment)
            block_index = [
                task_index[x:x + blocksize]
                for x in xrange(0, len(task_index), blocksize)
            ]
            for block in range(0, num_blocks(blocksize, task, experiment)):
                block_row = []
                rotation_angle = array(participant_data)[:, 2][
                    block_index[block]][0]
                task_type = array(participant_data)[:,
                                                    5][block_index[block]][0]
                block_mean = nanmean(
                    array(participant_data)[:, 4][block_index[block]].astype(
                        float))
                block_row.extend(
                    [task, task_type, block + 1,
                     '%.2f' % (float(block_mean))])
                task_row.append(block_row)
            task_matrix_blocked.extend(task_row)
        participant_matrix_blocked.append(deepcopy(task_matrix_blocked))
        ##### CONCATENATE BLOCKED MATRIX ################
    for idx_0, data in enumerate(participant_matrix_blocked):
        fields_block.append(participant_list[idx_0])
        if idx_0 == 0:
            data_matrix = data
        else:
            dv_column = []
            for row in data:
                dv_column.append(row[3])
            for idx_1, row in enumerate(data_matrix):
                data_matrix[idx_1].append(dv_column[idx_1])
    ############ Standard Deviation & Average In Participants #################
    tmp_data = []  # this will be outputted but in 'output_matrix'
    fields_block.extend(["average", "sd"
                         ])  # this only adds column names to a list of strings
    for row in array(data_matrix):
        participant_average = nanmean(row[3:].astype(float))
        participant_std = nanstd(row[3:].astype(float), ddof=1)
        row = row.tolist()
        row.append('%.2f' % (float(participant_average)))
        row.append('%.2f' % (float(participant_std)))
        tmp_data.append(row)
    output_matrix.append(deepcopy(tmp_data))

    ############ OUTPUT BY TARGET #################
    participant_matrix_target = []
    data_matrix = []
    for participant_data in participant_matrix:
        task_matrix_target = []
        jump_value = 0
        for task in task_list:
            task_row_target = []
            task_index = (array(participant_data)[:, 0] == task).nonzero()[0]
            #            print task_index
            target_list = unique(array(participant_data)[task_index][:, 3])
            for target in target_list:
                target_row = []
                target_index = (array(participant_data)[task_index][:, 3] ==
                                target).nonzero()[0]
                target_index = target_index + jump_value
                #                print target_index
                rotation_angle = array(participant_data)[target_index][:, 2][0]
                task_type = array(participant_data)[target_index][:, 5][0]
                target_mean = nanmean(
                    array(participant_data)[:, 4][target_index].astype(float))
                target_row.extend(
                    [task, task_type, target,
                     '%.2f' % (float(target_mean))])
                task_row_target.append(target_row)
            jump_value = jump_value + len(task_index)
            ##### ORDER TARGETS SMALLEST TO GREATEST ANGLE ######
            target_order = []
            for task in task_row_target:
                target_order.append(int(task[2]))
            st_idx = []
            for target in sorted(target_order):
                st_idx.append(target_order.index(target))
            task_row_target_array = array(task_row_target)
            task_row_target_array[:] = task_row_target_array[st_idx]
            task_row_target = task_row_target_array.tolist()
            ##### ADD TO END OF LIST ######
            task_matrix_target.extend(task_row_target)
        participant_matrix_target.append(task_matrix_target)

        ##### CONCATENATE TARGET MATRIX ################
    for idx_0, data in enumerate(participant_matrix_target):
        fields_target.append(participant_list[idx_0])
        if idx_0 == 0:
            data_matrix = data
        else:
            dv_column = []
            for row in data:
                dv_column.append(row[3])
            for idx_1, row in enumerate(data_matrix):
                data_matrix[idx_1].append(dv_column[idx_1])
    ############ Standard Deviation & Average In Participants #################
    tmp_data = []
    fields_target.extend(["average", "sd"])
    for row in array(data_matrix):
        participant_average = nanmean(row[4:].astype(float))
        participant_std = nanstd(row[4:].astype(float), ddof=1)
        row = row.tolist()
        row.append('%.2f' % (float(participant_average)))
        row.append('%.2f' % (float(participant_std)))
        tmp_data.append(row)
    output_matrix.append(deepcopy(tmp_data))

    ##### OUTPUT MATRIX FORM ###############################################
    #[participants by trial, participants by block, participants by target]#
    ########################################################################self.participant_list
    field_matrix.append(fields_trial)
    field_matrix.append(fields_block)
    field_matrix.append(fields_target)
    return field_matrix, output_matrix, participant_matrix
def KL_2D(f, g):
    warnings.filterwarnings('ignore')
    return nansum(f * (log(f / g) - 1) + g)
Ejemplo n.º 13
0
pyhfinfo = { "backend": "numpy", "hasgreeted": False, "backendver": "?", "ver": ver,
             "required": "0.6.1".split(".") }

try:
    pyhf.set_backend(b"pytorch")
    import torch
    pyhfinfo["backend"] = "pytorch"
    pyhfinfo["backendver"] = torch.__version__
except pyhf.exceptions.ImportBackendError as e:
    print ( "[SModelS:pyhfInterface] WARNING could not set pytorch as the pyhf backend, falling back to the default." )
    print ( "[SModelS:pyhfInterface] We however recommend that pytorch be installed." )
    import numpy
    pyhfinfo["backendver"]=numpy.version.full_version

    from numpy import warnings
    warnings.filterwarnings('ignore', r'invalid value encountered in log')

from scipy import optimize
import numpy as np
from smodels.tools.smodelsLogging import logger

def getLogger():
    """
    Configure the logging facility. Maybe adapted to fit into
    your framework.
    """

    import logging

    logger = logging.getLogger("pyhfInterface")
    # formatter = logging.Formatter('%(module)s - %(levelname)s: %(message)s')