Exemplo n.º 1
0
def runKnn():

    # generate indecies for the folds
    folds = KFold(len(allData), n_folds=5)

    # this loops over training and prediction for each fold
    for train_rows, test_rows in folds:

        # these four lines split the data into training and testing sets
        trainFeats = features.iloc[train_rows]
        trainLabs = labels.iloc[train_rows]
        testFeats = features.iloc[test_rows]
        testLabs = labels.iloc[test_rows]

        # train the learner
        knn = neighbors.KNeighborsRegressor(5, "distance")
        knn.fit(trainFeats, trainLabs)

        # measure accuracy
        predictions = knn.predict(testFeats)
        predictions = ndarray.flatten(predictions)
        targets = ndarray.flatten(testLabs.values)
        diffs = targets - predictions
        sse = 0
        se = 0
        denom = len(diffs)
        for val in diffs:
            if str(val) == "nan":
                denom -= 1
                continue
            sse += (val * val)
            se += abs(val)
        print(se / denom)
        print(sse / denom)
        print ""
Exemplo n.º 2
0
 def update(self,dict_in):
     if self.data == []
         self.y = flatten(dict_in['y'])
         self.x = flatten(dict_in['x'])
     value = 10*log10((norm(y-x,2)^2)/(norm(x_n-x,2)^2))
     self.data.append(value)
     
Exemplo n.º 3
0
def runKnn():

	# generate indecies for the folds
	folds = KFold(len(allData), n_folds=5)

	# this loops over training and prediction for each fold
	for train_rows, test_rows in folds:	

		# these four lines split the data into training and testing sets
		trainFeats = features.iloc[train_rows]		
		trainLabs = labels.iloc[train_rows]
		testFeats = features.iloc[test_rows]
		testLabs = labels.iloc[test_rows]

		# train the learner
		knn = neighbors.KNeighborsRegressor(5, "distance")
		knn.fit(trainFeats, trainLabs)
		
		# measure accuracy
		predictions = knn.predict(testFeats)
		predictions = ndarray.flatten(predictions)
		targets = ndarray.flatten(testLabs.values)
		diffs = targets - predictions
		sse = 0
		se = 0
		denom = len(diffs)
		for val in diffs:
			if str(val) == "nan":
				denom -= 1
				continue
			sse += (val * val)
			se += abs(val)
		print ( se / denom )
		print ( sse / denom )
		print ""
Exemplo n.º 4
0
Arquivo: data.py Projeto: kaonn/7
def gpremise(goals, goalsn, p):
    m, _, o1, p1 = goals.shape
    _, _, o2, p2 = goalsn.shape
    X = zeros((m, o1 * p1 + o2 * p2))
    for i in range(m):
        X[i] = concatenate(
            (ndarray.flatten(goals[i][p]), ndarray.flatten(goalsn[i][p])))
    return X
Exemplo n.º 5
0
Arquivo: data.py Projeto: kaonn/7
def cinput(ctxs, ctxsn, goals, goalsn):
    m, n, o = ctxs.shape
    _, p = ctxsn.shape

    X = zeros((m, 2 * n * o + 2 * p))
    for i in range(m):
        X[i] = concatenate((ndarray.flatten(ctxs[i]), ctxsn[i], \
                            ndarray.flatten(goals[i]), goalsn[i]))
    return X
Exemplo n.º 6
0
Arquivo: data.py Projeto: kaonn/7
def ginput(data, datan):
    m, n, o = data.shape
    _, p = datan.shape
    X = zeros((m, n * o + p))
    for i in range(m):
        X[i] = concatenate((ndarray.flatten(data[i]), datan[i]))
    return X
Exemplo n.º 7
0
def prepexpt(fn = '../Problem_nonoise_v2_init.mat'):

    # fn = '../PSI/Problem_nonoise_v1.mat'
    # fn = '../../PSI/PRECOMP_nonoise_py.mat'

    D = io.loadmat(fn, struct_as_record=False, squeeze_me=True)

    rho = 1e-0
    lambdaFk_nuc = 1e-10
    lambdaFu_nuc = 1e0
    lambdaFk_TF = 1e-10
    lambdaFu_TF = 1e-10

    obs = D['obs']
    opts = D['opts']
    GT = D['GT']
    Sk = D['Sk']
    Fk = D['Fk']
    Su = D['Su']
    Fu = D['Fu']

    if len(Su.shape)<2:
        Su = Su[:,None]
        Fu = Fu[None,:]

    if len(GT.Su.shape)<2:
        GT.Su = GT.Su[:,None]
        GT.Fu = GT.Fu[None,:]

    masks = D['masks']

    bw = GT.bw
    mask = ndarray.flatten(bw) > 0
    Nvoxk = sum(mask)


    [Nvox, Nproj] = opts.P.shape
    [Nproj, T] = obs.data_in.shape

    #Sk0 = GT.seg[mask, :]
    #Fk0 = GT.activity[:, :T]

    print('Initializing variables...')

    Y = obs.data_in
    P0 = opts.P.T  # transpose

    #Nsk = groundtruth.seg.shape[1]
    #Sk = 1e-3 * sp.rand(Nvoxk, Nsk)  # +Sk0



    Nsk = Sk.shape[1]
    Nsu = Su.shape[1]


    print '#Sk:', Nsk, '#Su:', Nsu
    print 'Done initialization!'

    return (Y,Sk,Fk,Su,Fu,GT, P0, mask, masks)
Exemplo n.º 8
0
def prepexpt(fn = '../Problem_nonoise_v2_init.mat'):

    # fn = '../PSI/Problem_nonoise_v1.mat'
    # fn = '../../PSI/PRECOMP_nonoise_py.mat'

    D = io.loadmat(fn, struct_as_record=False, squeeze_me=True)

    rho = 1e-0
    lambdaFk_nuc = 1e-10
    lambdaFu_nuc = 1e0
    lambdaFk_TF = 1e-10
    lambdaFu_TF = 1e-10

    obs = D['obs']
    opts = D['opts']
    GT = D['GT']
    Sk = D['Sk']
    Fk = D['Fk']
    Su = D['Su']
    Fu = D['Fu']

    if len(Su.shape)<2:
        Su = Su[:,None]
        Fu = Fu[None,:]

    if len(GT.Su.shape)<2:
        GT.Su = GT.Su[:,None]
        GT.Fu = GT.Fu[None,:]

    masks = D['masks']

    bw = GT.bw
    mask = ndarray.flatten(bw) > 0
    Nvoxk = sum(mask)


    [Nvox, Nproj] = opts.P.shape
    [Nproj, T] = obs.data_in.shape

    #Sk0 = GT.seg[mask, :]
    #Fk0 = GT.activity[:, :T]

    print('Initializing variables...')

    Y = obs.data_in
    P0 = opts.P.T  # transpose

    #Nsk = groundtruth.seg.shape[1]
    #Sk = 1e-3 * sp.rand(Nvoxk, Nsk)  # +Sk0



    Nsk = Sk.shape[1]
    Nsu = Su.shape[1]


    print '#Sk:', Nsk, '#Su:', Nsu
    print 'Done initialization!'

    return (Y,Sk,Fk,Su,Fu,GT, P0, mask, masks)
Exemplo n.º 9
0
    def __init__(self, data):
        """
        Constructor
        @param data: Array (list or numpy array)
        """
        # Create the CDF of the data
        # sort the data:
        if type(data) is pd.DataFrame:
            self.arr = sort(ndarray.flatten(data.values))

        else:
            # self.arr = sort(ndarray.flatten(data), axis=0)
            self.arr = sort(data, axis=0)

        self.iscomplex = iscomplexobj(self.arr)

        # calculate the proportional values of samples
        n = len(data)
        if n > 1:
            self.prob = arange(n, dtype=float) / (n - 1)
        else:
            self.prob = arange(n, dtype=float)

        # iterator index
        self.idx = 0

        # array length
        self.len = len(self.arr)
Exemplo n.º 10
0
    def __get_tensor_from_ndarray(ndarray: numpy_types) -> JavaObject:

        ctor = Tensor.__infer_tensor_ctor_from_ndarray(ndarray)
        values = k.to_java_array(ndarray.flatten().tolist())
        shape = k.to_java_long_array(ndarray.shape)

        return ctor(values, shape)
Exemplo n.º 11
0
def NGLostManN_f(NYrs, NGPctManApp, GrazingAnimal_0, NumAnimals, AvgAnimalWt, AnimalDailyN, NGAppNRate, Prec, DaysMonth,
                 NGPctSoilIncRate):
    lossFactAdj = LossFactAdj_f(Prec, DaysMonth)
    ng_app_man_n = NGAppManN_f(NGPctManApp, GrazingAnimal_0, NumAnimals, AvgAnimalWt, AnimalDailyN)
    result = tile(ng_app_man_n * NGAppNRate * (1 - NGPctSoilIncRate), NYrs) * ndarray.flatten(lossFactAdj)
    result = minimum(result, tile(ng_app_man_n, NYrs))  # TODO: should eliminate the double tile
    result = maximum(result, 0)
    return reshape(result, (NYrs, 12))
Exemplo n.º 12
0
    def plotN2d(self, sigMul=3, nxPts=20, nyPts=20):
        sigma = 1. / np.sqrt(np.diag(self.invCov))
        xa = np.linspace(self.mu[0] - sigMul * sigma[0],
                         self.mu[0] + sigMul * sigma[0], nxPts)
        ya = np.linspace(self.mu[1] - sigMul * sigma[1],
                         self.mu[1] + sigMul * sigma[1], nyPts)
        (xGrid, yGrid) = np.meshgrid(xa, ya)
        xGridF = ndarray.flatten(xGrid)
        yGridF = ndarray.flatten(yGrid)
        zGridF = np.zeros_like(xGridF)
        for (i, x) in enumerate(xGridF):
            y = yGridF[i]
            zGridF[i] = self.pdf(x, y)

        zGrid = np.reshape(zGridF, (nxPts, nyPts))

        plt.contour(xGrid, yGrid, zGrid, colors=['black'])
Exemplo n.º 13
0
def vlad_vector(vector_path, vocabulary):
    features = read_feature_vector(vector_path)
    codes = vq(features, vocabulary)[0]
    vlad = zeros(vocabulary.shape)
    for idx in range(codes.size):
        diff = subtract(features[idx], vocabulary[codes[idx]])
        vlad[codes[idx]] = add(diff, vlad[codes[idx]])
    return ndarray.flatten(vlad)
Exemplo n.º 14
0
def GRLostManN_f(NYrs, GRPctManApp, GrazingAnimal_0, NumAnimals, AvgAnimalWt, AnimalDailyN, GRAppNRate, Prec, DaysMonth,
                 GRPctSoilIncRate):
    lossFactAdj = LossFactAdj_f(Prec, DaysMonth)
    gr_app_man_n = GRAppManN_f(GRPctManApp, GrazingAnimal_0, NumAnimals, AvgAnimalWt, AnimalDailyN)
    result = (tile(gr_app_man_n, NYrs) * tile(GRAppNRate, NYrs) * ndarray.flatten(lossFactAdj) * tile(
        (1 - GRPctSoilIncRate), NYrs))
    result = minimum(result, tile(gr_app_man_n, NYrs))
    result = maximum(result, 0)
    return reshape(result, (NYrs, 12))
Exemplo n.º 15
0
def get_most_frequent_val(data_array):
    # Data array jest dwu-wymiarowa - trzeba ją "spłaszczyć" do tablicy 1-wymiarowej, aby móc wyznaczyć dominantę
    flatten_array = ndarray.flatten(data_array)
    converted_array = []

    # Dane w "spłaszczonej" tablicy mogą mieć różne typy - trzeba je zrzutować na jeden typ
    for i in flatten_array:
        converted_array.append(int(i))
    print("Wartość najczęściej występująca w zbiorze (dominanta): " +
          str(bincount(converted_array).argmax()))
Exemplo n.º 16
0
def vlad_vector(vector_path, vocabulary):
    features = read_feature_vector(vector_path)
    codes_idx = vq(features, vocabulary)[
        0]  #A length M array holding the code book index for each observation
    vlad = zeros(vocabulary.shape)
    for i in range(codes_idx.size):
        code_idx = codes_idx[i]
        diff = subtract(features[i], vocabulary[code_idx])
        vlad[code_idx] = add(diff, vlad[code_idx])
    return ndarray.flatten(vlad)
Exemplo n.º 17
0
    def embedPayload(self, payload, override=False):
        if type(payload) != Payload:
            raise TypeError
        if payload.img.size > self.img.size:
            raise ValueError
        if self.payloadExists() and override == False:
            raise Exception
        xml = ndarray.flatten(
            numpy.unpackbits(numpy.fromstring(payload.xml, dtype='uint8')))
        img = self.img

        if self.getScale() == "Color":
            crimg = []
            for i in img:
                for j in i:
                    crimg.append(j[0])
            for i in img:
                for j in i:
                    crimg.append(j[1])
            for i in img:
                for j in i:
                    crimg.append(j[2])
            crimg = ndarray.flatten(numpy.array(crimg))
            crimg[0:xml.size] &= 254
            crimg[0:xml.size] += xml
            spl = lambda lst, sz: [
                lst[i:i + sz] for i in range(0, len(lst), sz)
            ]
            lst = spl(crimg, int(len(crimg) / 3))
            l = []
            for i in range(int(len(crimg) / 3)):
                l.append([lst[0][i], lst[1][i], lst[2][i]])
            img = numpy.array(l)
            psize = self.img.shape
            img = numpy.resize(img, (int(psize[0]), int(psize[1]), 3))
        else:
            img = ndarray.flatten(img)
            img[0:xml.size] &= 254
            img[0:xml.size] += xml
            psize = self.img.shape
            img = numpy.resize(img, psize)
        return img
Exemplo n.º 18
0
def GRLostManN_f(NYrs, GRPctManApp, GrazingAnimal_0, NumAnimals, AvgAnimalWt,
                 AnimalDailyN, GRAppNRate, Prec, DaysMonth, GRPctSoilIncRate):
    lossFactAdj = LossFactAdj_f(Prec, DaysMonth)
    gr_app_man_n = GRAppManN_f(GRPctManApp, GrazingAnimal_0, NumAnimals,
                               AvgAnimalWt, AnimalDailyN)
    result = (tile(gr_app_man_n, NYrs) * tile(GRAppNRate, NYrs) *
              ndarray.flatten(lossFactAdj) * tile(
                  (1 - GRPctSoilIncRate), NYrs))
    result = minimum(result, tile(gr_app_man_n, NYrs))
    result = maximum(result, 0)
    return reshape(result, (NYrs, 12))
Exemplo n.º 19
0
    def __get_java_array_from_ndarray(ndarray: numpy_types) -> JavaObject:
        if ndarray.size == 0:
            raise ValueError("Cannot infer type because array is empty")

        if np.issubdtype(ndarray.dtype, np.bool_):
            return k.jvm_view().Py4jByteArrayConverter.toBooleanArray(
                np.packbits(ndarray.flatten()).tobytes(), ndarray.size)
        elif np.issubdtype(ndarray.dtype, np.int32):
            return k.jvm_view().Py4jByteArrayConverter.toIntegerArray(
                ndarray.flatten().tobytes())
        elif np.issubdtype(ndarray.dtype, np.int64):
            return k.jvm_view().Py4jByteArrayConverter.toLongArray(
                ndarray.flatten().tobytes())
        elif np.issubdtype(ndarray.dtype, np.floating):
            return k.jvm_view().Py4jByteArrayConverter.toDoubleArray(
                ndarray.flatten().tobytes())
        else:
            raise NotImplementedError(
                "Generic types in an ndarray are not supported. Was given {}".
                format(ndarray.dtype))
Exemplo n.º 20
0
def NGLostManN_f(NYrs, NGPctManApp, GrazingAnimal_0, NumAnimals, AvgAnimalWt,
                 AnimalDailyN, NGAppNRate, Prec, DaysMonth, NGPctSoilIncRate):
    lossFactAdj = LossFactAdj_f(Prec, DaysMonth)
    ng_app_man_n = NGAppManN_f(NGPctManApp, GrazingAnimal_0, NumAnimals,
                               AvgAnimalWt, AnimalDailyN)
    result = tile(ng_app_man_n * NGAppNRate *
                  (1 - NGPctSoilIncRate), NYrs) * ndarray.flatten(lossFactAdj)
    result = minimum(result,
                     tile(ng_app_man_n,
                          NYrs))  # TODO: should eliminate the double tile
    result = maximum(result, 0)
    return reshape(result, (NYrs, 12))
Exemplo n.º 21
0
 def codePic(self):
     if self.getScale() == "Gray":
         gray = ndarray.flatten(self.img)
         if self.cmplvl != -1:
             gray = compress(ndarray.flatten(self.img), self.cmplvl)
         imgCode = base64.b64encode(gray)
     elif self.getScale() == "Color":
         color = []
         for i in self.img:
             for j in i:
                 color.append(j[0])
         for i in self.img:
             for j in i:
                 color.append(j[1])
         for i in self.img:
             for j in i:
                 color.append(j[2])
         color = numpy.array(color)
         if self.cmplvl != -1:
             color = compress(color, self.cmplvl)
         imgCode = base64.b64encode(color)
     return str(imgCode)[2:-1]
Exemplo n.º 22
0
def runRegressionModel(model):

    # generate indecies for the folds
    folds = KFold(len(allData), n_folds=5)
    curFold = 1
    # this loops over training and prediction for each fold
    for train_rows, test_rows in folds:

        # these four lines split the data into training and testing sets
        trainFeats = features.iloc[train_rows].as_matrix()
        trainLabs = labels.iloc[train_rows].as_matrix()
        testFeats = features.iloc[test_rows].as_matrix()
        testLabs = labels.iloc[test_rows].as_matrix()

        # train the learner
        model = model.fit(trainFeats, trainLabs)

        # measure accuracy
        predictions = model.predict(testFeats)
        predictions = ndarray.flatten(predictions)
        targets = ndarray.flatten(testLabs)
        diffs = targets - predictions
        sse = 0
        se = 0
        denom = len(diffs)
        for val in diffs:
            if str(val) == "nan":
                denom -= 1
                continue
            sse += (val * val)
            se += abs(val)
        print "== Fold " + str(curFold) + " =="
        print "-- ME: " + str(se / denom)
        print "-- MSE: " + str(sse / denom)
        print ""
        curFold += 1
Exemplo n.º 23
0
def runRegressionModel(model):

	# generate indecies for the folds
	folds = KFold(len(allData), n_folds=5)
	curFold = 1
	# this loops over training and prediction for each fold
	for train_rows, test_rows in folds:

		# these four lines split the data into training and testing sets
		trainFeats = features.iloc[train_rows].as_matrix()		
		trainLabs = labels.iloc[train_rows].as_matrix()
		testFeats = features.iloc[test_rows].as_matrix()
		testLabs = labels.iloc[test_rows].as_matrix()

		# train the learner
		model = model.fit(trainFeats, trainLabs)
		
		# measure accuracy
		predictions = model.predict(testFeats)
		predictions = ndarray.flatten(predictions)
		targets = ndarray.flatten(testLabs)
		diffs = targets - predictions
		sse = 0
		se = 0
		denom = len(diffs)
		for val in diffs:
			if str(val) == "nan":
				denom -= 1
				continue
			sse += (val * val)
			se += abs(val)
		print "== Fold " + str(curFold) + " =="
		print "-- ME: " + str( se / denom )
		print "-- MSE: " + str( sse / denom )
		print ""
		curFold += 1
Exemplo n.º 24
0
def sum_path(mat):
    """ Finds the path with the minimal sum in a given matrix, where movement is only right and below"""
    global sols    

    if size(mat) == 0:
        return 0
    if size(mat) == 1:
        return mat[0,0]
    if min(mat.shape[0],mat.shape[1]) == 1:
        return sum(ndarray.flatten(mat))
    if mat.shape in sols:
        return sols[mat.shape]

    ans = mat[-1,-1] + min(sum_path(mat[:,:-1]), sum_path(mat[:-1,:]))
    sols[mat.shape] = ans
    return ans
Exemplo n.º 25
0
    def compile_types(Sbus, types=None, logger=list()):
        """
        Compile the types
        @return:
        """

        pq = where(types == BusMode.PQ.value[0])[0]
        pv = where(types == BusMode.PV.value[0])[0]
        ref = where(types == BusMode.REF.value[0])[0]
        sto = where(types == BusMode.STO_DISPATCH.value)[0]

        if len(ref) == 0:  # there is no slack!

            if len(pv) == 0:  # there are no pv neither -> blackout grid

                warn('There are no slack nodes selected')
                logger.append('There are no slack nodes selected')

            else:  # select the first PV generator as the slack

                mx = max(Sbus[pv])
                if mx > 0:
                    # find the generator that is injecting the most
                    i = where(Sbus == mx)[0][0]

                else:
                    # all the generators are injecting zero, pick the first pv
                    i = pv[0]

                # delete the selected pv bus from the pv list and put it in the slack list
                pv = delete(pv, where(pv == i)[0])
                ref = [i]
                # print('Setting bus', i, 'as slack')

            ref = ndarray.flatten(array(ref))
            types[ref] = BusMode.REF.value[0]
        else:
            pass  # no problem :)

        pqpv = r_[pq, pv]
        pqpv.sort()

        return ref, pq, pv, pqpv
Exemplo n.º 26
0
def xbee_1():
    device = XBeeDevice('COM4', 230400)

    remote_addr = '0013A2004155E2A6'
    remote_device = RemoteXBeeDevice(device, XBee64BitAddress.from_hex_string(remote_addr))

    device.open()
    msg = "Test from COM4"
    count = 0

    try:
        rcv_bytes = device.read_data(1)
        rcv = pickle.loads(rcv_bytes.data)
        result = []
        print('First rcv: {0:}'.format(rcv))
        print('rcv.data_type: {0:}'.format(rcv.data_type))
        if rcv.data_type == 'audio':
            print('Got "Audio"')
            result = []
            result.append(rcv)
            for i in range(0, rcv.total_sequence - 1):
                rcv_bytes = device.read_data(1)
                rcv = pickle.loads(rcv_bytes.data)
                result.append(rcv)
        print("COM4 received:" + repr(rcv))
     
        result_arr = build_data(result)
        print(result_arr)
        flat_arr = ndarray.flatten(result_arr)
        print(flat_arr)

        newrecording = AudioSegment(flat_arr, sample_width=2, frame_rate=8000, channels=1)
        newrecording.export('file.flac', format = "flac")

        print('Sending from 4')
        bytes_obj = pickle.dumps('ACK')
        device.send_data(remote_device, bytes_obj)
    except TimeoutException:
        print("Timeout on COM4")
Exemplo n.º 27
0
 def extractPayload(self):
     if self.payloadExists() is False:
         raise Exception
     if self.getScale() == "Gray":
         new_img = self.img & 1
         num = numpy.packbits(ndarray.flatten(new_img))
     else:
         color = []
         for i in self.img:
             for j in i:
                 color.append(j[0])
         for i in self.img:
             for j in i:
                 color.append(j[1])
         for i in self.img:
             for j in i:
                 color.append(j[2])
         color = numpy.array(color) & 1
         num = numpy.packbits(numpy.array(color))
     xml = ''
     for i in num:
         xml += chr(i)
     return Payload(xml=xml)
Exemplo n.º 28
0
 def payloadExists(self):
     if self.getScale() == "Gray":
         new_img = self.img & 1
         num = numpy.packbits(ndarray.flatten(new_img))
     else:
         color = []
         for i in self.img:
             for j in i:
                 color.append(j[0])
         for i in self.img:
             for j in i:
                 color.append(j[1])
         for i in self.img:
             for j in i:
                 color.append(j[2])
         color = numpy.array(color) & 1
         num = numpy.packbits(numpy.array(color))
     xml = ''
     for i in num:
         xml += chr(i)
     if re.search(r'<?xml version', xml):
         return True
     else:
         return False
Exemplo n.º 29
0
def lossfun_aug():
    l_aug = 0

    # primary loss: the KL divergence between Y and X
    l_Poiss = Y[:, tidx2] * (np.log(Y[:, tidx2]) - np.log(X[:, tidx2]))
    l_Poiss = sum(l_Poiss[np.isfinite(l_Poiss)]) + sum(sum(X[:, tidx2] - Y[:, tidx2]))
    l_X = (rho / 2) * sum(sum((Xhat[:, tidx2] - X[:, tidx2] + np.square(U_X[:, tidx2]) - np.square(U_X[:, tidx2]))))

    l_Fk = 0
    [u, s, v] = np.linalg.svd(Fk_nuc)
    l_Fk = l_Fk + lambdaFk_nuc * sum(s)
    l_Fk = l_Fk + (rho / 2) * sum(
        (ndarray.flatten(Fk_nuc) - ndarray.flatten(Fk) + np.square(ndarray.flatten(U_Fk_nuc)) - np.square(ndarray.flatten(U_Fk_nuc))))
    l_Fk = l_Fk + (rho / 2) * sum(
        (ndarray.flatten(Fk_nn) - ndarray.flatten(Fk) + np.square(ndarray.flatten(U_Fk_nn)) - np.square(ndarray.flatten(U_Fk_nn))))
    # l_Fk = l_Fk + lambdaFk_TF*sum(Fk_TF*)
    # l_Fk = l_Fk + lambdaFk_TF*sum(Fk_TF*)

    l_Fu = 0
    [u, s, v] = np.linalg.svd(Fu_nuc)
    l_Fu = l_Fu + lambdaFu_nuc * sum(s)
    l_Fu = l_Fu + (rho / 2) * sum(
        (ndarray.flatten(Fu_nuc) - ndarray.flatten(Fu) + np.square(ndarray.flatten(U_Fu_nuc)) - np.square(ndarray.flatten(U_Fu_nuc))))
    l_Fu = l_Fu + (rho / 2) * sum(
        (ndarray.flatten(Fu_nn) - ndarray.flatten(Fu) + np.square(ndarray.flatten(U_Fu_nn)) - np.square(ndarray.flatten(U_Fu_nn))))

    l_Sk = 0
    l_Sk = l_Sk + (rho / 2) * sum(
        (ndarray.flatten(Sk_nn) - ndarray.flatten(Sk) + np.square(ndarray.flatten(U_Sk_nn)) - np.square(ndarray.flatten(U_Sk_nn))))
    l_Su = 0
    l_Su = l_Su + (rho / 2) * sum(
        (ndarray.flatten(Su_nn) - ndarray.flatten(Su) + np.square(ndarray.flatten(U_Su_nn)) - np.square(ndarray.flatten(U_Su_nn))))

    l_aug = l_Poiss + l_X + l_Fk + l_Fu + l_Sk + l_Su

    print 'l_Poiss: ', l_Poiss
    print 'l_X: ', l_X
    print 'l_Fk: ', l_Fk
    print 'l_Fu: ', l_Fu
    print 'l_Sk: ', l_Sk
    print 'l_Su: ', l_Su

    l_aug = l_aug / Y[:, tidx2].size
    return l_aug
Exemplo n.º 30
0
 async def pull_CB(self, instance, async_lib):
     'Periodically update the value'
     from time import time, sleep
     await instance.write(value=ndarray.flatten(daq.buffer.get_all()))
Exemplo n.º 31
0
    def transfer_learning(self, max_iterations, accepted_mean_square_error=0.1, batch_size=5000, learning_rate=1e-4):

        # Adam optimizer optimizes the parameters (weights and biases) at the learning rate specified
        output_network_optimizer = Adam(self.output_neural_net.parameters(), lr=learning_rate)

        # This is the row count difference between that of input and output grids
        row_difference = self.output_grid_dimension[0] - self.input_grid_dimension[0]

        # This is the column count difference between that of input and output grids
        column_difference = self.output_grid_dimension[1] - self.input_grid_dimension[1]

        # We cycle through iterations of each batch of training the output neural network until the max iteration
        for _ in range(0, max_iterations):

            # Training list - each element in the list contains [input state for output neural net, target value]
            training_list = []

            # Shuffling through batches and then calculating the Mean square error for the entire batch
            for batch in range(0, batch_size):

                # Creating a matrix to hold the observation state of the input neural network map (0 or 1)
                output_network_known_state = np_array([[randint(0, 1) for _ in range(0, self.output_grid_dimension[1])]
                                                      for _ in range(0, self.output_grid_dimension[0])])

                # This is used to store the robot and target state of the input neural network
                input_network_state = np_empty((self.network_robot_count+1)*2, dtype=np_uint8)

                # Creating positions for all of the robots and target of the input neural network randomly
                for i in range(0, (self.network_robot_count+1)*2, 2):
                    input_network_state[i] = randint(0, self.input_grid_dimension[0]-1)
                    input_network_state[i+1] = randint(0, self.input_grid_dimension[1]-1)

                # Creates a backup copy of the input state
                input_network_state_memory = input_network_state

                # Sliding the input network state window over different sections of the output network state
                for i in range(0, row_difference):
                    for j in range(0, column_difference):

                        # Creating a matrix to hold the observation state of the input neural network map (0 or 1)
                        input_network_known_state = output_network_known_state[i:(i+self.input_grid_dimension[0]),
                                                                               j:(j+self.input_grid_dimension[1])]

                        # This is used to store the robot and target state of the output neural network
                        output_network_state = np_empty((self.network_robot_count+1)*2, dtype=np_uint8)

                        # Extending the input position states across the moving window within the output grid dimensions
                        for k in range(0, (self.network_robot_count+1)*2, 2):
                            output_network_state[k] = input_network_state[k]+i
                            output_network_state[k+1] = input_network_state[k+1]+j

                        # Now we flatten data in the input network state, a 2-D matrix to 1-D and append
                        input_network_state = np_append(input_network_state,
                                                        np_ndarray.flatten(input_network_known_state))

                        # Now we flatten data in the output network state, a 2-D matrix to 1-D and append
                        output_network_state = np_append(output_network_state,
                                                         np_ndarray.flatten(output_network_known_state))

                        # Looping through the 4 possible actions each robot can take and then appending them to state
                        for k in range(0, 4):
                            # Adding an action completes the input state for the input neural network
                            input_network_state_tensor = Tensor(np_append(input_network_state, k))
                            # Adding an action completes the input state for the output neural network
                            output_network_state_tensor = Tensor(np_append(output_network_state, k))
                            # Getting the Q value predicted by the input neural network for the given state
                            input_network_predicted_value = self.input_neural_net.forward(input_network_state_tensor)
                            # Now we know the value the output neural network is to be trained towards for its given
                            # input. Add both of them to the training list so that batch training can occur later
                            training_list.append([output_network_state_tensor, input_network_predicted_value])

                        # Restoring the input state from memory
                        input_network_state = input_network_state_memory

            # Shuffling the training data before feeding it in for training
            shuffle(training_list)
            # Initializing the current MSE loss
            sum_square_error = 0.0
            # Using the batch of state and target data for training the output neural network
            for batch in range(0, batch_size):
                # Obtaining the completed input states for the output neural network
                output_network_state_tensor = training_list[batch][0]
                # Obtaining the target predictions that the output neural network should be trained towards
                predicted_target_value = training_list[batch][1]
                # Getting the Q value predicted by the output neural network for the given input state
                output_network_predicted_value = self.output_neural_net.forward(output_network_state_tensor)
                # Adding the current square error to the sum of square errors
                sum_square_error += pow((output_network_predicted_value - predicted_target_value), 2)
                # Represents the function that can calculate training error
                training_error_function = MSELoss()
                # Our goal is to reduce the mean square error loss between the target prediction and that of network
                training_error = training_error_function(output_network_predicted_value, predicted_target_value)
                # Clears the gradients of all optimized torch tensors
                output_network_optimizer.zero_grad()
                # During the backwards pass, gradients from each replica are summed into the original module
                training_error.backward()
                # Training actually happens here. Performs a single optimization step of weights and biases
                output_network_optimizer.step()

            # Dividing the sum of square errors by the batch size to get the mean square error
            current_mean_square_error = sum_square_error/batch_size

            print(current_mean_square_error)

            # Checks if the MSE for the entire batch is within acceptable levels and then returns the output neural net
            if current_mean_square_error <= accepted_mean_square_error:
                # we return a list where true indicates that we achieved the accepted mean square error criteria
                return [self.output_neural_net, True]

        # Failed to completely train the output neural network. Return a list with second element false to indicate this
        return [self.output_neural_net, False]
Exemplo n.º 32
0
def linear_symbolic(A=None, b=None, G=None, h=None):
    """Convert linear equality and inequality constraints from matrices to a 
symbolic string of the form required by mystic's constraint parser.

Inputs:
    A -- (ndarray) matrix of coefficients of linear equality constraints
    b -- (ndarray) vector of solutions of linear equality constraints
    G -- (ndarray) matrix of coefficients of linear inequality constraints
    h -- (ndarray) vector of solutions of linear inequality constraints

    NOTE: Must provide A and b; G and h; or A, b, G, and h;
          where Ax = b and Gx <= h. 

    For example:
    >>> A = [[3., 4., 5.],
    ...      [1., 6., -9.]]
    >>> b = [0., 0.]
    >>> G = [1., 0., 0.]
    >>> h = [5.]
    >>> print linear_symbolic(A,b,G,h)
    1.0*x0 + 0.0*x1 + 0.0*x2 <= 5.0
    3.0*x0 + 4.0*x1 + 5.0*x2 = 0.0
    1.0*x0 + 6.0*x1 + -9.0*x2 = 0.0
"""
    eqstring = ""
    # Equality constraints
    if A != None and b != None:
        # If one-dimensional and not in a nested list, add a list layer
        try:
            ndim = len(A[0])
        except:
            ndim = len(A)
            A = [A]

        # Flatten b, in case it's in the form [[0, 1, 2]] for example.
        if len(b) == 1:
            b = list(ndarray.flatten(asarray(b)))

        # Check dimensions and give errors if incorrect.
        if len(A) != len(b):
            raise Exception("Dimensions of A and b are not consistent.")

        # 'matrix multiply' and form the string
        for i in range(len(b)):
            Asum = ""
            for j in range(ndim):
                Asum += str(A[i][j]) + '*x' + str(j) + ' + '
            eqstring += Asum.rstrip(' + ') + ' = ' + str(b[i]) + '\n'

    # Inequality constraints
    ineqstring = ""
    if G != None and h != None:
        # If one-dimensional and not in a nested list, add a list layer
        try:
            ndim = len(G[0])
        except:
            ndim = len(G)
            G = [G]

        # Flatten h, in case it's in the form [[0, 1, 2]] for example.
        if len(h) == 1:
            h = list(ndarray.flatten(asarray(h)))

        # Check dimensions and give errors if incorrect.
        if len(G) != len(h):
            raise Exception("Dimensions of G and h are not consistent.")

        # 'matrix multiply' and form the string
        for i in range(len(h)):
            Gsum = ""
            for j in range(ndim):
                Gsum += str(G[i][j]) + '*x' + str(j) + ' + '
            ineqstring += Gsum.rstrip(' + ') + ' <= ' + str(h[i]) + '\n'
    totalconstraints = ineqstring + eqstring
    return totalconstraints 
Exemplo n.º 33
0
 async def pull_IMAGE(self, instance, async_lib):
     'Periodically update the value'
     await instance.write(
         value=ndarray.flatten(camera.buffer.get_last_value()))
Exemplo n.º 34
0
def qppv(B, msk, nd, wl, nrp, cth, n_itr_th, mx_itr, pfs):
    """This code is adapted from the paper
       "Quasi-periodic patterns(QP):Large-scale dynamics in resting state fMRI that correlate"\
       with local infraslow electrical activity" Shella Keilholz,D et al.NeuroImage,Volume 84, 1 January 2014."\
       The paper implemnts the algorithms for finding QPP in resting state fMRI using matlab"\
       This project is an attempt to adopt the algorithm in python, and to integrate into C-PAC.
       Input:
       ------
       B: 2D nifti image 
       msk: mask of the 2D nifti image
       nd: number of subjects*number of runs per subject
       wl: window length
       nrp: number of repetitions 
       cth: threshold
       n_itr_th: number of iterations
       mx_itr: maximum number of repetitions 
       pfs: path to save the template, FTP, ITP and iter files
       
       
       Returns:
       -------
       time_course_file: 2D array of time points where QPP is detected in .npy format
       ftp_file: 1D array of Final Time Points in .npy format
       itp_file: 1D array of Final Time points in .npy format
       iter_file: 1D array of iterations in .npy format 
       
       Notes:
       -----
       i) If using a .mat file as an input, save only the image with flag 'v7.0' to make it scipy.io loadmat compatible
       (This functionality will soon be replaced by importing with NifTi format only)
       
       ii) To show the peaks found in the signal, add a show=True boolean values in the "find peaks" command.
       A "True" value plots the peaks that are found in the signal.
       
       Examples:
       --------
       >> python detectqppv.py '/path/to/Data/file.mat'
       'path/to/mask/file/' 30 6 0.2 0.3 1 15 'path/to/save/results/' 6 1
    """
    #get parameters of the image shape to decide the
    #shape of the cells,arrays,etc
    nT = B.shape[1]  #smaller value
    nX = B.shape[0]  #larger value
    #print(nT,nX)
    nt = int(nT / nd)  #to prevent floating point errors during initializations

    nch = nt - wl + 1
    nTf = (nX * wl)
    #make it a boolean mask - all valies with entries greater than zeros will become 1 the rest will be zero
    #no real use of mask anywhere else?
    msk = np.zeros((nX, 1))
    msk[(np.sum(abs(B)) > 0)] = 1
    a = np.where(msk[:, 0] == 1)
    B = B[a[0], :]

    #defining 3D arrayshere. Each array within the 2D array will finally be a nX*wl shape column vector, which will store the template values
    bchf = np.zeros((nT, nX * wl))
    bchfn = np.zeros((nT, nX * wl))
    #for each subject*run store the template into the bchf array. Instead of using transpose and multiplication, just us dot product of the template square to be stored in bchfn,
    #This step. Presumably is done to maximize the peaks that are found within the arrays(eplained below)
    for i in range(nd):
        for ich in range(nch):
            template = B[:, (i) * nt + ich:(i) * nt + wl + ich]
            #change template from a row vector to a column vector
            template = ndarray.flatten(template)
            # insert the template into the bchfn array (this template will be a 1D array)
            bchf[i * nt + ich] = template
            #normalize
            template = template - np.sum(template) / nTf
            #get dot product
            #template_trans = np.transpose(template)
            temp_dot = np.dot(template, template)
            template_sqrt = np.sqrt(temp_dot)
            template = template / template_sqrt
            #add said template into bchfn
            bchfn[(i) * nt + ich] = template
            #removing nan values and making them 0 to prevent further issues in calculations
            A = np.isnan(bchfn)
            bchfn[A] = 0
        #todo: have to make args.nd and other args.something as just the variable name

    #array initialized to later be deleted from the random ITP array
    i2x = np.zeros((nd, wl - 1))
    #filling the sequence with range of numbers from wl+2 to nt
    for i in range(1, nd + 1):
        i2x[i - 1, :] = range(i * nt - wl + 2, i * nt + 1)

    #delete instances of ITP from i2x
    itp = np.arange(1, nT + 1)
    i2x = ndarray.flatten(i2x)

    itp = np.delete(itp, i2x - 1, 0)
    #permute the numbers within ITP
    #itp = np.random.permutation(itp)

    itp = np.random.permutation(itp)
    itp = itp[0:nrp]

    #Initialize the time course that will later on be saved
    time_course = np.zeros((nrp, nT))
    ftp = [[None]] * nrp
    iter = np.zeros(nrp)
    for irp in range(nrp):
        #initialize a matrix c which will hold the templates
        c = np.zeros(nT)
        for i in range(nd):
            for ich in range(nch):
                #bchfn_transpose = np.transpose(bchfn[itp[irp]])
                bchfn_1 = bchfn[itp[irp]]
                bchfn_2 = bchfn[i * nt + ich]
                c[(i) * nt + ich] = np.dot(bchfn_1, bchfn_2)
                #print(c.shape)
        #using MARCUS DEUTRE'S awesome detect_peaks.py function which is a replica of the matlab find peaks function
        #switching off show true until it is necessary, in order to test code.
        peaks = detect_peaks(c, mph=cth[0], mpd=wl)
        #show=True)

        #indexes = pu.indexes(c, thresh=c[0])
        #You're deleting the first and last instances of the peaks that are now in the 'peaks' array
        for i in range(nd):
            if i * nt in peaks:
                peaks = np.delete(peaks, np.where(peaks == (i) * nt))
            if i * nt + nch in peaks:
                peaks = np.delete(peaks, np.where(peaks == i * nt + nch))
        #house three copies of templates (inefficient) which is then used to decide between the correlation coefficient in the next loop
        c_0 = c
        c_00 = c
        c_000 = c
        itr = 1
        #peaks_size = peaks.size
        #print(peaks)
        #print(peaks.shape)
        #print(peaks.size)
        while itr <= mx_itr:
            c = gaussian_filter(c, 0.5)

            if itr <= n_itr_th:
                ith = 0
            else:
                ith = 1
            th = cth[ith]
            tpsgth = peaks
            n_tpsgth = np.size(tpsgth)

            if n_tpsgth <= 1:
                break

            template = bchf[tpsgth[0]]
            for i in range(1, n_tpsgth):
                template = template + bchf[tpsgth[i]]
            template = template / n_tpsgth

            #perform a repeate of the operations in order to find peaks in the template
            #template_trans2=np.transpose(template)
            template = template - np.sum(template) / nTf
            template = template / np.sqrt(np.dot(template, template))
            for i in range(nd):
                for ich in range(nch):
                    c[i * nt + ich] = np.dot(template, bchfn[(i) * nt + ich])
            peaks = detect_peaks(c, mph=cth[1], mpd=wl)
            for i in range(nd):
                if i * nt in peaks:
                    peaks = np.delete(peaks, np.where(peaks == (i) * nt))
                if i * nt + nch in peaks:
                    peaks = np.delete(peaks, np.where(peaks == i * nt + nch))
            c_0_norm = (c_0 - np.mean(c_0)) / (np.std(c_0))
            #use the correlation coefficient. It returns a matrix and therefore, the first entry of that matrix will be the correlation coefficient value
            if (np.corrcoef(c_0, c)[0, 1] > 0.9999) or (np.corrcoef(
                    c_00, c)[0, 1] > 0.9999) or (np.corrcoef(c_000, c)[0, 1] >
                                                 0.9999):
                break

            c_000 = c_00
            c_00 = c_0
            c_0 = c
            itr = itr + 1

        if n_tpsgth > 1:
            time_course[irp, :] = c
            ftp[irp] = tpsgth.tolist()
            iter[irp] = itr
    #save everything!!

    plt.plot(template, 'b')
    plt.title('Template of QPP(nd=6,wl=30,subjects=7)')
    plt.xlabel('avg of func.data of length WL(30)')
    plt.show()
    mdict = {}
    mdict["C"] = time_course
    mdict["FTP"] = ftp
    mdict["ITER"] = iter
    mdict["ITP"] = itp

    np.save('time_course_file', time_course)
    np.save('ftp_file', ftp)
    np.save('iter_file', iter)
    np.save('itp_file', itp)

    return time_course, ftp, itp, iter
Exemplo n.º 35
0
def lossfun_aug():
    l_aug = 0

    # primary loss: the KL divergence between Y and X
    l_Poiss = Y[:, tidx2] * (np.log(Y[:, tidx2]) - np.log(X[:, tidx2]))
    l_Poiss = sum(l_Poiss[np.isfinite(l_Poiss)]) + sum(
        sum(X[:, tidx2] - Y[:, tidx2]))
    l_X = (rho / 2) * sum(
        sum((Xhat[:, tidx2] - X[:, tidx2] + np.square(U_X[:, tidx2]) -
             np.square(U_X[:, tidx2]))))

    l_Fk = 0
    [u, s, v] = np.linalg.svd(Fk_nuc)
    l_Fk = l_Fk + lambdaFk_nuc * sum(s)
    l_Fk = l_Fk + (rho / 2) * sum(
        (ndarray.flatten(Fk_nuc) - ndarray.flatten(Fk) + np.square(
            ndarray.flatten(U_Fk_nuc)) - np.square(ndarray.flatten(U_Fk_nuc))))
    l_Fk = l_Fk + (rho / 2) * sum(
        (ndarray.flatten(Fk_nn) - ndarray.flatten(Fk) + np.square(
            ndarray.flatten(U_Fk_nn)) - np.square(ndarray.flatten(U_Fk_nn))))
    # l_Fk = l_Fk + lambdaFk_TF*sum(Fk_TF*)
    # l_Fk = l_Fk + lambdaFk_TF*sum(Fk_TF*)

    l_Fu = 0
    [u, s, v] = np.linalg.svd(Fu_nuc)
    l_Fu = l_Fu + lambdaFu_nuc * sum(s)
    l_Fu = l_Fu + (rho / 2) * sum(
        (ndarray.flatten(Fu_nuc) - ndarray.flatten(Fu) + np.square(
            ndarray.flatten(U_Fu_nuc)) - np.square(ndarray.flatten(U_Fu_nuc))))
    l_Fu = l_Fu + (rho / 2) * sum(
        (ndarray.flatten(Fu_nn) - ndarray.flatten(Fu) + np.square(
            ndarray.flatten(U_Fu_nn)) - np.square(ndarray.flatten(U_Fu_nn))))

    l_Sk = 0
    l_Sk = l_Sk + (rho / 2) * sum(
        (ndarray.flatten(Sk_nn) - ndarray.flatten(Sk) + np.square(
            ndarray.flatten(U_Sk_nn)) - np.square(ndarray.flatten(U_Sk_nn))))
    l_Su = 0
    l_Su = l_Su + (rho / 2) * sum(
        (ndarray.flatten(Su_nn) - ndarray.flatten(Su) + np.square(
            ndarray.flatten(U_Su_nn)) - np.square(ndarray.flatten(U_Su_nn))))

    l_aug = l_Poiss + l_X + l_Fk + l_Fu + l_Sk + l_Su

    print 'l_Poiss: ', l_Poiss
    print 'l_X: ', l_X
    print 'l_Fk: ', l_Fk
    print 'l_Fu: ', l_Fu
    print 'l_Sk: ', l_Sk
    print 'l_Su: ', l_Su

    l_aug = l_aug / Y[:, tidx2].size
    return l_aug
Exemplo n.º 36
0
def X_Theta_to_V(X,Theta):
    V = concatenate( ( X.reshape( -1, 1), Theta.reshape( -1, 1) ) )
    return ndarray.flatten(asarray( V.T ) )
Exemplo n.º 37
0
    def score_hand(hand, flip, verbose):
        """Calculates the points in a given hand.
        In cribbage, you can get, pairs, fifteens, runs, flushes, and nobs.
        Paramerers
            hand: 4 card hand (tuple or list)
            flip: flipped card (str)
            verbose: True/False
        Returns: total points  (int)
        """
        if type(hand) == tuple:
            hand = list(hand)
        hand = hand + [flip]
        nums = [int(c.split('-')[0]) for c in hand]
        suits = [c.split('-')[1] for c in hand]

        # nobs
        jack = 0
        if 11 in nums:
            flip_suit = flip.split('-')[1]
            for card in hand:
                if card.split('-') == ['11', flip_suit]:
                    jack = 1

        # pairs
        pairs = {i: nums.count(i) for i in nums}
        pair_score = sum(
            [Cribbage.permu(n, 2) for n in pairs.values() if n > 1])

        # flush
        if len(unique(suits[:4])) == 1:
            if flip.split('-')[1] == suits[0]:
                flush_score = 5
            else:
                flush_score = 4
        else:
            flush_score = 0

        #fifteens and runs
        fifteens = list()
        runs_raw = list()

        for comb in [combinations(hand, i) for i in list(range(6, 1, -1))]:
            for c in (list(comb)):
                #fifteen
                c_adj = [
                    10 if int(n.split('-')[0]) > 10 else int(n.split('-')[0])
                    for n in c
                ]  # deals with face cards
                if c not in fifteens and sum(c_adj) == 15:
                    fifteens.append(c)

                # runs
                nums_a = [int(c_.split('-')[0]) for c_ in c]
                l = len(c_adj)
                c_sorted = sorted(c)
                if l >= 3 and len(unique(nums_a)) == l and (
                        max(nums_a) - min(nums_a)) == (l - 1):
                    runs_raw.append(tuple(c_sorted))

        runs = [list(x) for x in Cribbage.get_unique_runs(runs_raw)
                ]  # helps in counting points

        fifteen_score = len(fifteens) * 2
        runs_score = len(ndarray.flatten(asarray(runs)))

        if verbose:
            pair_explain = [
                "{} {}s".format(v, k) for k, v in pairs.items() if v > 1
            ]
            s = """Jack: {}\npairs({}): {}\nfifteens({}): {}\nruns({}): {}\nflush: {}"""
            print(
                s.format(jack, pair_score, pair_explain, fifteen_score,
                         fifteens, runs_score, runs, flush_score))

        return int(jack + pair_score + flush_score + fifteen_score +
                   runs_score)
Exemplo n.º 38
0
def X_Theta_to_V(X, Theta):
    V = concatenate((X.reshape(-1, 1), Theta.reshape(-1, 1)))
    return ndarray.flatten(asarray(V.T))
    for i in range(1, len(extractor[:, 1, 1])):
        meanIntensityList.append(np.mean(extractor[i, :, :]))
    plt.plot(range(1, len(extractor[:, 1, 1])),
             (meanIntensityList > meanIntensity))
    #plt.hlines(np.mean(meanIntensityList)+stdIntensity/10,0,len(extractor[:,1,1]))
plt.show
#ps(anisotropicExtract[:,:,:])

#%%
ws = sitk.MorphologicalWatershed(anisotropicExtract[30:180, :, :],
                                 level=5,
                                 markWatershedLine=False,
                                 fullyConnected=False)
#ws1=(ws==0)
#ps(ws)
c = (np.bincount(ndarray.flatten(s2a(ws))))
#c.argmax()
wsArr = s2a(ws)
wsList = np.nonzero(wsArr == c.argmax())
randomList = []
for i in range(500):
    randomList.append(np.random.randint(0, len(wsList[0])))
globList = []
for el in randomList:
    seedLoc = [wsList[0][el], wsList[1][el], wsList[2][el]]
    globList.append(glob(anisotropicExtract, seedLoc, 30))
    c = c + 1
globMeans = []
for el in globList:
    globMeans.append(el[1])
seedLocation = globList[globMeans.index(np.min(globMeans))][0]
Exemplo n.º 40
0
def linear_symbolic(A=None, b=None, G=None, h=None):
    """Convert linear equality and inequality constraints from matrices to a 
symbolic string of the form required by mystic's constraint parser.

Inputs:
    A -- (ndarray) matrix of coefficients of linear equality constraints
    b -- (ndarray) vector of solutions of linear equality constraints
    G -- (ndarray) matrix of coefficients of linear inequality constraints
    h -- (ndarray) vector of solutions of linear inequality constraints

    NOTE: Must provide A and b; G and h; or A, b, G, and h;
          where Ax = b and Gx <= h. 

    For example:
    >>> A = [[3., 4., 5.],
    ...      [1., 6., -9.]]
    >>> b = [0., 0.]
    >>> G = [1., 0., 0.]
    >>> h = [5.]
    >>> print linear_symbolic(A,b,G,h)
    1.0*x0 + 0.0*x1 + 0.0*x2 <= 5.0
    3.0*x0 + 4.0*x1 + 5.0*x2 = 0.0
    1.0*x0 + 6.0*x1 + -9.0*x2 = 0.0
"""
    eqstring = ""
    # Equality constraints
    if A is not None and b is not None:
        # If one-dimensional and not in a nested list, add a list layer
        try:
            ndim = len(A[0])
        except:
            ndim = len(A)
            A = [A]

        # Flatten b, in case it's in the form [[0, 1, 2]] for example.
        if len(b) == 1:
            b = ndarray.flatten(asarray(b)).tolist()

        # Check dimensions and give errors if incorrect.
        if len(A) != len(b):
            raise Exception("Dimensions of A and b are not consistent.")

        # 'matrix multiply' and form the string
        for i in range(len(b)):
            Asum = ""
            for j in range(ndim):
                Asum += str(A[i][j]) + '*x' + str(j) + ' + '
            eqstring += Asum.rstrip(' + ') + ' = ' + str(b[i]) + '\n'

    # Inequality constraints
    ineqstring = ""
    if G is not None and h is not None:
        # If one-dimensional and not in a nested list, add a list layer
        try:
            ndim = len(G[0])
        except:
            ndim = len(G)
            G = [G]

        # Flatten h, in case it's in the form [[0, 1, 2]] for example.
        if len(h) == 1:
            h = ndarray.flatten(asarray(h)).tolist()

        # Check dimensions and give errors if incorrect.
        if len(G) != len(h):
            raise Exception("Dimensions of G and h are not consistent.")

        # 'matrix multiply' and form the string
        for i in range(len(h)):
            Gsum = ""
            for j in range(ndim):
                Gsum += str(G[i][j]) + '*x' + str(j) + ' + '
            ineqstring += Gsum.rstrip(' + ') + ' <= ' + str(h[i]) + '\n'
    totalconstraints = ineqstring + eqstring
    return totalconstraints