Exemplo n.º 1
0
def ArtistsRate(JCR, period, istest=False):    
    global constants
    cols = ["id", "reviews_avgnote", "reviews_all", "playlisters_logged", "starers_logged", "listeners_logged", "downloaders_logged"]
    if period == 'total': cols += ['days_since_publication']
    COLUMNS = JCR.getColumns(cols)

    
    #REVIEWS RATE
    reviews_reduceunder = constants[period]['reviews_reduceunder']    
    #for album in JCR.iterAlbumJoinArtist() 
    #bestreviewed
    COLUMNS['reviews_rate'] = computeBayesAvg(COLUMNS["reviews_avgnote"], COLUMNS['reviews_all'], constants[period]['reviews_bayes_const'], reviews_reduceunder) 

    #LIKESRATE
    likes = COLUMNS["playlisters_logged"] + 1.5*COLUMNS["starers_logged"] #*1.5 to bring the 2 values-set to about the same level avg level 
                                            
    ratios =  np.true_divide(likes, COLUMNS["listeners_logged"]+1)
    likes_reduceunder = constants[period]['likes_reduceunder']
    COLUMNS['likes_rate'] = computeBayesAvg(ratios, COLUMNS["listeners_logged"]+1, constants[period]['likes_bayes_const'], likes_reduceunder)

    #FINAL RATE COMBINING BY RANK RATE
    if period=='total': 
        days = 1 + nomramlizeTo0_1(np.log2( COLUMNS['days_since_publication'] + 2 ))
        downloaders = np.true_divide(COLUMNS['downloaders_logged'], days) 
    else: downloaders = COLUMNS['downloaders_logged']
    
    COLUMNS['rate'] = getRanks(COLUMNS['likes_rate']) + 1.5*getRanks(COLUMNS['reviews_rate']) + getRanks(COLUMNS['downloaders_logged']) 
    
    
    if istest: 
        return COLUMNS
    else: 
        return {'id': COLUMNS['id'], 'rate': COLUMNS['rate']}
def thresholdColor(img, colattr):
    (domchan, dommin, first, second) = colattr
    channels = cv2.split(img)#red, green, blue
    width, height, cha = img.shape
    mult = np.empty((width,height)).astype(np.uint8)
    mult.fill(255)
    red = channels[2].astype(np.uint8)
    green = channels[1].astype(np.uint8)
    blue = channels[0].astype(np.uint8)
    firsttype = np.zeros(img.shape)
    secondtype = np.zeros(img.shape)

    if domchan == "r":
        zerotype = (red > dommin)
        firsttype = np.true_divide(red,green)#r/g
        secondtype = np.true_divide(red,blue)#r/b
    elif domchan == "g":
        zerotype = (green > dommin)
        firsttype = np.true_divide(green,red)#g/r
        secondtype = np.true_divide(green,blue)#g/b

    zerotype = zerotype.astype(np.int)
    firsttype = (firsttype > first).astype(np.int)# & (firsttype < first[1])
    secondtype = (secondtype > second).astype(np.int)# & (secondtype < second[1])
    combined = cv2.bitwise_and(cv2.bitwise_and(zerotype, secondtype), firsttype)
    combined = cv2.multiply(combined.astype(np.uint8), mult)

    return combined
Exemplo n.º 3
0
def find_entropy_threshold(img_gray, ranges):
    hist_item = cv2.calcHist([img_gray],[0],None,[256],[0,256])
    # compute a mask of non zero items to avoid
    # computing a log(0) later on
    mask = hist_item != 0
    entropies = []
    for k in ranges:
        # [:k] is [0, k[, but [k:] is [k, end]
        range1 = hist_item[:k]
        range2 = hist_item[k:]

        sum_r1 = np.sum(range1)
        sum_r2 = np.sum(range2)
        # protect against div by zero / log(0) => nan
        if sum_r1 == 0 or sum_r2 == 0:
            # insert a 0 to keep the same len as "ranges"
            entropies.append(0)
            continue

        mask1 = mask[:k]
        mask2 = mask[k:]

        sum_ln_r1 = np.sum(np.multiply(range1[mask1], np.log(range1[mask1])))
        sum_ln_r2 = np.sum(np.multiply(range2[mask2], np.log(range2[mask2])))

        entropy = (np.log(sum_r1) + np.log(sum_r2)
                   - np.true_divide(sum_ln_r1, sum_r1)
                   - np.true_divide(sum_ln_r2, sum_r2))
        entropies.append(entropy)
    entropies = np.array(entropies)
    # get the maximum "k"
    k = np.argmax(entropies)
    # return a value in the original range
    return ranges[k]
Exemplo n.º 4
0
 def theta(self, S, t, vol, r):
     if t > self.T:
         return np.zeros(len(S))
     if self.op_type == 'c':
         return np.subtract(
                 np.true_divide(
                     np.multiply(-vol, 
                         np.multiply(S, norm.pdf(self.d1(S, t, vol, r)))), 
                     np.multiply(2,
                         np.power(
                             np.subtract(self.T, t), .5))),
                 np.multiply(r,
                     np.multiply(self.K,
                         np.multiply(
                             np.exp(
                                 np.multiply(-r,
                                     np.subtract(self.T, t))),
                             norm.cdf(self.d2(S, t, vol, r))))))
     else:
         return np.add(
                 np.true_divide(
                     np.multiply(-vol, 
                         np.multiply(S, norm.pdf(self.d1(S, t, vol, r)))), 
                     np.multiply(2,
                         np.power(
                             np.subtract(self.T, t), .5))),
                 np.multiply(r,
                     np.multiply(self.K,
                         np.multiply(
                             np.exp(
                                 np.multiply(-r,
                                     np.subtract(self.T, t))),
                             norm.cdf(
                                 np.multiply(-1, self.d2(S, t, vol, r)))))))
Exemplo n.º 5
0
def kappa_score(y_true, y_pred):
  """Calculate Cohen's kappa for classification tasks.

  See https://en.wikipedia.org/wiki/Cohen%27s_kappa

  Note that this implementation of Cohen's kappa expects binary labels.

  Args:
    y_true: Numpy array containing true values.
    y_pred: Numpy array containing predicted values.

  Returns:
    kappa: Numpy array containing kappa for each classification task.

  Raises:
    AssertionError: If y_true and y_pred are not the same size, or if class
      labels are not in [0, 1].
  """
  assert len(y_true) == len(y_pred), 'Number of examples does not match.'
  yt = np.asarray(y_true, dtype=int)
  yp = np.asarray(y_pred, dtype=int)
  assert np.array_equal(np.unique(yt), [0, 1]), (
      'Class labels must be binary: %s' % np.unique(yt))
  observed_agreement = np.true_divide(np.count_nonzero(np.equal(yt, yp)),
                                      len(yt))
  expected_agreement = np.true_divide(
      np.count_nonzero(yt == 1) * np.count_nonzero(yp == 1) +
      np.count_nonzero(yt == 0) * np.count_nonzero(yp == 0),
      len(yt) ** 2)
  kappa = np.true_divide(observed_agreement - expected_agreement,
                         1.0 - expected_agreement)
  return kappa
Exemplo n.º 6
0
def compute_IICR_n_islands(n, M, t, s=True):
    # This method evaluates the lambda function in a vector
    # of time values t.
    # If 's' is True we are in the case when two individuals where
    # sampled from the same island. If 's' is false, then the two
    # individuals where sampled from different islands.

    # Computing constants
    gamma = np.true_divide(M, n - 1)
    delta = (1 + n * gamma) ** 2 - 4 * gamma
    alpha = 0.5 * (1 + n * gamma + np.sqrt(delta))
    beta = 0.5 * (1 + n * gamma - np.sqrt(delta))

    # Now we evaluate
    x_vector = t
    if s:
        numerator = (1 - beta) * np.exp(-alpha * x_vector) + (alpha - 1) * np.exp(-beta * x_vector)
        denominator = (alpha - gamma) * np.exp(-alpha * x_vector) + (gamma - beta) * np.exp(-beta * x_vector)
    else:
        numerator = beta * np.exp(-alpha * (x_vector)) - alpha * np.exp(-beta * (x_vector))
        denominator = gamma * (np.exp(-alpha * (x_vector)) - np.exp(-beta * (x_vector)))

    lambda_t = np.true_divide(numerator, denominator)

    return lambda_t
Exemplo n.º 7
0
    def work(self, input_items, output_items):
        in0 = input_items[0]
        out = output_items[0]
	if len(in0) != self.frame_length:
		print "pilot receive input buffer size != frame length"
	if self.scounter==0: #start of a frame
		Y0 = in0[:self.pilot_length]
		Y = np.dot(self.prewhiten, Y0.transpose())
		Y = Y.transpose()
		#=====================debugging msg========================
		#print "!!!!! received = "
		#print Y
		#==========================================================
		corr = np.true_divide(np.dot(Y.transpose(),self.pilot_seq.conj()),self.pilot_length) #nt x nt
		#=====================debugging msg========================
		#print "Estimated Channel! Estimated channel = "
		#print corr
		#==========================================================
		A = np.dot(corr,corr.transpose().conj())
		B = np.true_divide(np.dot(Y.transpose(),Y.conj()),self.pilot_length)
		Omega = B-A
		Sigma = np.dot(np.linalg.pinv(Omega)-np.linalg.pinv(B),self.weight) #nt x nt
		#=====================debugging msg========================
		#print "Pilot detected! Sigma ="
		#print Sigma
		#==========================================================
		out[0] = Sigma.reshape(self.nt*self.nt)
	self.scounter = self.scounter + len(in0)
	if self.scounter==self.frame_length:
		self.fcounter += 1
		self.scounter = 0

        return 1
Exemplo n.º 8
0
def _hist_bin_doane(x):
    """
    Doane's histogram bin estimator.

    Improved version of Sturges' formula which works better for
    non-normal data. See
    stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning

    Parameters
    ----------
    x : array_like
        Input data that is to be histogrammed, trimmed to range. May not
        be empty.

    Returns
    -------
    h : An estimate of the optimal bin width for the given data.
    """
    if x.size > 2:
        sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
        sigma = np.std(x)
        if sigma > 0.0:
            # These three operations add up to
            # g1 = np.mean(((x - np.mean(x)) / sigma)**3)
            # but use only one temp array instead of three
            temp = x - np.mean(x)
            np.true_divide(temp, sigma, temp)
            np.power(temp, 3, temp)
            g1 = np.mean(temp)
            return x.ptp() / (1.0 + np.log2(x.size) +
                                    np.log2(1.0 + np.absolute(g1) / sg1))
    return 0.0
def VImetric(ConfusionMatrix):
    '''Computes Meila's VI metric between two
        clusterings of the same data, e.g. KK clustering 
        and the detcrit_groundtruth from the confusion matrix'''
    
 
    #nbklust = ConfusionMatrix.shape[0]
    #nbklustprime = ConfusionMatrix.shape[1]
    
    totalspikes = np.sum(ConfusionMatrix)
    
    #Pk = np.zeros((nbklust,1))
    #Pkprime = np.zeros((nbklustprime, 1))
    
    Pk = np.true_divide(np.sum(ConfusionMatrix, axis = 1),totalspikes)
    #for k in np.arange(nbklust):
    #    Pk[k] = np.sum(ConfusionMatrix[k,:])/totalspikes
    
    Pkprime = np.true_divide(np.sum(ConfusionMatrix, axis = 0),totalspikes)
    #for kk in np.arange(nbklustprime):
    #    Pkprime[kk] = np.sum(ConfusionMatrix[:,kk])/totalspikes
    
    PJoint = np.true_divide(ConfusionMatrix,totalspikes)
    
    HC = EntropyH(Pk)
    HCprime = EntropyH(Pkprime)


    Inff = MutualInf(Pk,Pkprime,PJoint)
    #mutual_inf = (infisum, Infie)
    VI = HC+HCprime - 2*Inff[0];

    VImetrics = {'VI':VI, 'Mutual Inf': Inff, 'PJoint' : PJoint, 'PK': Pk, 'PKprime': Pkprime, 'HC': HC, 'HCprime': HCprime}

    return VImetrics    
Exemplo n.º 10
0
 def __call__(self, values, clip=True, out=None):
     values = _prepare(values, clip=clip, out=out)
     np.multiply(values, np.log(self.exp + 1.), out=values)
     np.exp(values, out=values)
     np.subtract(values, 1., out=values)
     np.true_divide(values, self.exp, out=values)
     return values
Exemplo n.º 11
0
def corrected_mean_ndvi(rgb, irview, roi):
    """Experimental function"""
    # Obtain the exposure values from the images
    rgb_exp, _ = read_header(rgb)
    ir_exp, _ = read_header(irview)

    # Obtain bands from rgb image
    red, green, blue = get_image_bands(rgb, roi=roi)


    # Obtain one of the bands from the IR image
    ir, _, _ = get_image_bands(irview, roi=roi)

    red = np.ma.average(red)
    green = np.ma.average(green)
    blue = np.ma.average(blue)
    ir = np.ma.average(ir)

    # Calculate the 'visible component' of the image pair
    visible = 0.3 * red + 0.59 * green + 0.11 * blue

    if rgb_exp != ir_exp:
        # Correct for exposure
        ir = np.true_divide(ir, math.sqrt(float(ir_exp)))
        red = np.true_divide(red, math.sqrt(float(rgb_exp)))
        visible = np.true_divide(visible, math.sqrt(float(rgb_exp)))

    # Calculate the NIR component:
    nir = ir - visible

    # Now calculate NDVI
    ndvi = (nir - red) / (nir + red)

    return ndvi
def this_create_total_dict():
    input_dir='/tmp/fvs_fun/fv_deep_funneled/'
    fv_dict = dict()
    count=0;
    for name in os.listdir(input_dir):
        count=count+1
        input_file = os.path.join(input_dir,name)
        with open(input_file) as f:
            #w, h = [float(x) for x in f.readline().split()]
            #array = [[float(x) for x in line.split()] for line in f]
            s= numpy.genfromtxt(input_file, delimiter=',')
            

            word1 = "".join(re.findall("[a-zA-Z]+", name))
            word = word1[:-3]
            if word in fv_dict:
                fv_dict[word].append([numpy.true_divide(s,numpy.linalg.norm(s,ord=2))])
                #fv_dict[word]= numpy.concatenate(  (fv_dict[word],  numpy.true_divide(s,numpy.linalg.norm(s,ord=2))) ,axis=1)
            else:
                fv_dict[word]=[numpy.true_divide(s,numpy.linalg.norm(s,ord=2))]

            print count
            if count>10:
                break;
    return fv_dict
Exemplo n.º 13
0
def yes(features, knowledge, smooth):
    
    #calculate probability of yes
    p_y = math.log(np.true_divide(float(knowledge["nmb_y"]), (knowledge["nmb_y"] + knowledge["nmb_n"])))
    p_f_y = p_y
    #calculate probability of no
    p_n = math.log(np.true_divide(float(knowledge["nmb_n"]), (knowledge["nmb_y"] + knowledge["nmb_n"])))
    p_f_n = p_n
    for e in features:
        #calculate p(yes|features)
        if (not knowledge["frqy"].has_key(e)) and (not knowledge["frqn"].has_key(e)):
            continue
        tmp = (0 if (not knowledge["frqy"].has_key(e)) else knowledge["frqy"][e]) + smooth
        if tmp == 0:
            p_f_y =float('-inf')
        else:
            p_f_y = p_f_y + math.log(float(tmp)/(len(knowledge["frqy"]) * smooth + knowledge["total_f_y"]))
        
        #calculate p(no|features) 
        tmp = (0 if (not knowledge["frqn"].has_key(e)) else knowledge["frqn"][e]) + smooth
        if tmp == 0:
            p_f_n =float('-inf')
        else:
            p_f_n = p_f_n + math.log(float(tmp)/(len(knowledge["frqn"]) * smooth + knowledge["total_f_n"]))

    if (p_f_y == float('-inf') and p_f_n == float('-inf')):
        rlt = 'yes' if p_y > p_n else 'n0'     
    else:
        rlt = 'yes' if p_f_y > p_f_n else 'no' 
    return rlt
Exemplo n.º 14
0
    def __call__(self, values, clip=None):

        if clip is None:
            clip = self.clip

        if isinstance(values, ma.MaskedArray):
            if clip:
                mask = False
            else:
                mask = values.mask
            values = values.filled(self.vmax)
        else:
            mask = False

        # Make sure scalars get broadcast to 1-d
        if np.isscalar(values):
            values = np.array([values], dtype=float)
        else:
            # copy because of in-place operations after
            values = np.array(values, copy=True, dtype=float)

        # Normalize based on vmin and vmax
        np.subtract(values, self.vmin, out=values)

        np.true_divide(values, self.vmax - self.vmin, out=values)

        # Clip to the 0 to 1 range
        if self.clip:
            values = np.clip(values, 0., 1., out=values)

        # Stretch values
        values = self.stretch(values, out=values, clip=False)

        # Convert to masked array for matplotlib
        return ma.array(values, mask=mask)
Exemplo n.º 15
0
def hand_to_features(hand, convolution_function, play="river"):
    # hand: list representation of n hands e.g., ['Kc','Ac','6c','7h']
    # returns the output of the filters

    diction = {"river": 7, "turn": 6, "flop": 5}
    flush_filter = numpy.ones((1, 13))
    vertical_filter = numpy.ones((4, 1))
    horizontal_filter = numpy.ones((1, 5))

    if play not in diction.keys():
        print "play must be river, turn or flop"
        quit()
    hand_matrix = string_to_vector(hand)

    column_sums = numpy.sum(hand_matrix, axis=0)
    column_sums[column_sums > 1] = 1
    column_sums = column_sums.reshape((1, 14))
    stripped_hand_matrix = numpy.delete(hand_matrix, 0, 1)

    straight_features = convolution_function(column_sums, horizontal_filter)
    straight_features_normalized = numpy.true_divide(straight_features, 5)
    two_three_four_features = convolution_function(stripped_hand_matrix, vertical_filter)
    two_three_four_features_normalized = numpy.true_divide(two_three_four_features, 4)
    flush_features = convolution_function(stripped_hand_matrix, flush_filter).T
    flush_features_normalized = numpy.true_divide(flush_features, diction[play])

    return numpy.concatenate(
        (straight_features_normalized, flush_features_normalized, two_three_four_features_normalized), axis=1
    )
def thresholdColor(img):
    NTIME = time.time()

    channels = cv2.split(img)
    red = channels[2].astype(np.uint8)
    green = channels[1].astype(np.uint8)
    blue = channels[0].astype(np.uint8)

    r_g = np.true_divide(red,green)
    r_b = np.true_divide(red,blue)
    g_r = np.true_divide(green,red)
    g_b = np.true_divide(green,blue)

    
    combs = []
    for col in colors:
        (dom,minv,first,second)=color_dims[col]
        if dom == "r":
            comb = ((red > minv) & (r_g > first) & (r_b > second)).astype(np.uint8)
        elif dom == "g":
            comb = ((green > minv) & (g_r > first) & (g_b > second)).astype(np.uint8) 
        combs.append(comb)
        
    BTIME = time.time()
    rospy.loginfo("IN")
    rospy.loginfo(BTIME-NTIME)
    
    return combs
Exemplo n.º 17
0
def search():
    print('*' * 80)
    print('Searching: ')
    boundary_of_category = dict()
    max_p_category = np.nanmax(jll, axis=0)  # max probability in each category
    min_p_category = np.nanmin(jll, axis=0)  # min probability in each category
    for categoryid in categoryid_set:
        print('\t%s\tSearching in %s' % (datetime.now(), categoryid))
        idx = np.where(clf.classes_ == categoryid)
        tp = (y_true == categoryid) & (y_pred == categoryid)
        fp = (y_true != categoryid) & (y_pred == categoryid)
        fn = (y_true == categoryid) & (y_pred != categoryid)
        proba_tp = np.sort(max_proba[tp])
        proba_fp = np.sort(max_proba[fp])
        proba_fn = np.sort(max_proba[fn])
        threshold = np.linspace(min_p_category[idx], max_p_category[idx], 100)
        tp_num = proba_tp.shape[0] - np.searchsorted(proba_tp, threshold)
        fp_num = proba_fp.shape[0] - np.searchsorted(proba_fp, threshold)
        fn_num = proba_fn.shape[0] + np.searchsorted(proba_tp, threshold)
        accuracy = np.true_divide(tp_num, (tp_num + fp_num))
        recall = np.true_divide(tp_num, (tp_num + fn_num))
        f1 = np.true_divide(2 * accuracy * recall, (accuracy + recall))
        idx_max_f1 = np.nanargmax(f1)
        boundary_of_category[categoryid] = threshold[idx_max_f1]
        y_pred[(max_proba < threshold[idx_max_f1])
               & (y_pred == categoryid)] = None
    if args.persistence:
        with codecs.open('boundary.json', encoding='utf-8', mode='w') as f:
            json.dump(obj=boundary_of_category, fp=f, ensure_ascii=False,
                      encoding='utf-8', indent=4, separators=(',', ': '))
Exemplo n.º 18
0
def persp(winsize, nf, dim=3):
    """
    Assumptions: 2D: Device coordinates (eye coordinates) have
    (0,0) at the top left corner. z is just normalized.
    This is because in 2D I like to pretend the screen is a big bitmap.

    3D: I am not going to bother explaining what is going on here.
        It is relatively simple but this was incredibly painful to implement
        on top of poorly written code.
        It is dull mathematics. One thing to note: If you are using your own
        geometry shader you MUST do some sort of clipping plane check
        on the output, because if you don't you will get weird artifacts around the near
        plane because of the asymptotic behavior.

    :param winsize: width, height
    :return: device coordinates normalized to the x,y axis
    """

    w = np.true_divide(2, winsize[0])
    h = np.true_divide(2, winsize[1])
    n, f = nf
    if dim == 3:
        return np.array([[w*n, 0, 0, 0],
                [0, h*n, 0, 0],
                [0, 0, -(n+f)/float(f-n), -2*n*f/float(f-n)],
                [0, 0, -1, 0]], dtype="float32")

    else:
        return [[2*w, 0, -1],
                [0, -2*h, 1],
                [0, 0, 1]]
Exemplo n.º 19
0
def plot_coding_forward(data, node):
    speeds,forwarded,coded = nodes_forwarded_coded(data, "coding", node)
    speeds =  len(data['coding']['slaves']) * numpy.array(speeds)

    # Normalize
    total = numpy.add.reduce((forwarded, numpy.array(coded)*2))
    forwarded_norm = numpy.true_divide(forwarded, total)
    coded_norm = numpy.true_divide(coded, total)
    total_norm = forwarded_norm + coded_norm

    # Create and setup a new figure
    fig = pylab.figure()
    ax = fig.add_subplot(111)
    ax.set_xlabel("Total Offered Load [kbit/s]")
    ax.set_ylabel("Packets Ratio")
    ax.set_title("Packets Forwarded/Coded")
    ax.grid(True)

    # Plot data
    ax.plot(speeds, forwarded_norm, linewidth=2, color=c['chameleon2'])
    ax.plot(speeds, coded_norm, linewidth=2, color=c['skyblue2'])
    ax.plot(speeds, total_norm, linewidth=2, color=c['scarletred2'])
    ax.legend(("Forwarded", "Coded", "Total"), loc='upper left', shadow=True)

    # Add figure to list of created figures
    figures["{}_fw_coded".format(node)] = fig
Exemplo n.º 20
0
def handle_data(context, data):    
    
    # get data
    d = get_data(data,context.stocks)
    if d == None:
       return
    
    prices = d[0]
    volumes = d[1]
    
    if not context.init:
        rebalance_portfolio(context, data, context.b_t)        
        context.init = True
        return

    m = context.m #Lenth of b vector
    x_tilde = np.zeros(m) #x_tilde vector
    b_optimal = np.zeros(m) #b_vector
    
    #For each window, we calculate the optimal b_norm
    for k, w in enumerate(W_L):
        #Caluclate predicted x for that window size
        for i, stock in enumerate(context.stocks):
            #Predicted ratio of price change from t to t+1
            if vwap == True:
                vol_weights = np.true_divide(volumes[W_MAX-w:,i],np.sum(volumes[W_MAX-w:,i]))
                vwa_price = np.average(prices[W_MAX-w:,i],None,vol_weights)
                x_tilde[i] = vwa_price/prices[W_MAX-1,i]
            else:
                x_tilde[i] = np.mean(prices[W_MAX-w:,i])/prices[W_MAX-1,i]
            
        #Update the b_w matrix
        context.b_w[:,k] = find_b_norm(context,x_tilde,context.b_w[:,k])
    
    length_p = len(prices[:,0])   #Length of price vector
    p_t = prices[length_p-1,:]    #Price vector today
    p_y = prices[length_p-2,:]    #Price vector yesterday
    
    #Ratio of price change (1 x m) vector from t-1 to t
    x_t = np.true_divide(p_t,p_y)
    
    #w is length of W_L
    #Daily returns (1 x w) vector
    s_d = np.dot(x_t,context.b_w)
    
    #Cumulative returns (1 x w) vector
    context.s_w = np.multiply(context.s_w,s_d)
    
    #Calculate return_weights (1 x w) vector
    return_weights = np.true_divide(context.s_w[:],np.sum(context.s_w)) #Weight according to cum. returns
    
    #Calculate b_{t+1} (n x 1) vector from (m x w) * (w x 1)
    b_optimal = np.dot(context.b_w,np.transpose(return_weights)) #Calculate the weighted portfolio
    
    #SJUAR
    #log.info(b_optimal)
    rebalance_portfolio(context, data, b_optimal)
    
    # update portfolio
    context.b_t = b_optimal
Exemplo n.º 21
0
def run_cv(data,label,weight):

    # run cross validation on training set to get stats

    # configure weights
    wp = len(np.where(label == 1)[0])
    wd = len(np.where(label == 0)[0])
    print 'Scale pos. weight: {}'.format(3.*np.true_divide(wd,wp))

    # setup parameters for xgboost
    param = {}
    # use logistic regression loss, use raw prediction before logistic transformation
    # since we only need the rank
    param['objective'] = 'binary:logistic'
    # scale weight of positive examples
    param['scale_pos_weight'] = 3.*np.true_divide(wd,wp)
    param['eta']               = 0.05
    param['eval_metric']       = 'error'
    param['silent']            = 1
    param['nthread']           = 6
    param['min_child_weight']  = 2
    param['max_depth']         = 10
    param['gamma']             = 0.0
    param['colsample_bytree']  = 0.8
    param['subsample']         = 0.9
    param['reg_alpha']         = 1e-5

    # boost 25 tres
    num_round = 300

    test_error    = []
    test_falsepos = []
    test_falseneg = []
    scores        = np.zeros((2,len(label)))

    # get folds
    skf = StratifiedKFold(label, 10, shuffle=True)
    for i, (train, test) in enumerate(skf):
        #print train, test
        Xtrain = data[train]
        ytrain = label[train]
        wtrain = label[train]
        Xtest  = data[test]
        ytest  = label[test]
        wtest = label[test]
        # make dmatrices from xgboost
        dtrain = xgb.DMatrix( Xtrain, label=ytrain )
        dtest  = xgb.DMatrix( Xtest )
        #watchlist = [ (dtrain,'train') ]

        bst   = xgb.train(param, dtrain, num_round)
        ypred = bst.predict(dtest)
        fold_error,fold_falsepos,fold_falseneg = compute_stats(ytest,ypred)
        test_error.append(fold_error)
        test_falsepos.append(fold_falsepos)
        test_falseneg.append(fold_falseneg)
        scores[0,test] = ytest
        scores[1,test] = ypred

    return test_error,test_falsepos,test_falseneg,scores
def thresholdColor(img):
    width,height,cha = img.shape
    mult = np.empty((width,height)).astype(np.uint8)
    mult.fill(255)
    channels = cv2.split(img)
    red = channels[2].astype(np.uint8)
    green = channels[1].astype(np.uint8)
    blue = channels[0].astype(np.uint8)
    r_g = np.true_divide(red,green)
    r_b = np.true_divide(red,blue)
    g_r = np.true_divide(green,red)
    g_b = np.true_divide(green,blue)

    combs = []
    for col in colors:
        (dom,minv,first,second)=col
        if dom == "r":
            zerotype = red > minv
            firstype = r_g
            secondtype = r_b
        elif dom == "g":
            zerotype = green > minv
            firsttype = g_r
            secondtype = g_b
        zerotype = zerotype.astype(np.int)
        firsttype = (firsttype > first).astype(np.int)
        secondtype = (secondtype > second).astype(np.int)
        combined = cv2.bitwise_and(cv2.bitwise_and(zerotype,firsttype),secondtype)
        combined = cv2.multiply(combined.astype(np.uint8),mult)
        combs.append(combined)
    return combs
def get_clipped_resized_masks(boxes, input_masks):
  target_size = cfg.MASK_SIZE
  num_boxes = boxes.shape[0]
  num_channels = input_masks.shape[1]
  #masks = cymask.clip_resize_mask(sp, reg2sp, boxes, target_size)
  masks = np.zeros((num_boxes, num_channels, target_size, target_size))
  totaltime=0.
  yv = np.arange(target_size).reshape(-1,1)
  xv = np.arange(target_size).reshape(1,-1)
  yv = np.true_divide(yv, float(target_size)-1.)
  xv = np.true_divide(xv, float(target_size)-1.)
  
  for k in range(num_boxes):
    box = boxes[k,:]
    xmin = box[0]
    ymin = box[1]
    w = box[2]-box[0]
    h = box[3]-box[1]
    xv1 = np.round(xmin + xv*w).astype(int)
    yv1 = np.round(ymin + yv*h).astype(int)
    for l in range(num_channels):
      M = input_masks[k,l,yv1, xv1]
      masks[k,l,:,:]=M
    #S = sp[box[1]:box[3]+1,box[0]:box[2]+1]
    #M = reg2sp[:,k]
    #M = M[S-1]
    #start = time.clock()
    #masks[k,:,:] = scipymisc.imresize(M, (target_size, target_size), 'nearest')
    #stop = time.clock()
    #totaltime+=stop-start 
  return masks
Exemplo n.º 24
0
 def __init__(self, n=10, M=0.1):
     T2_StSI.__init__(self, n, M)
     [A, B, a, alpha, beta, E, AplusB, AminusB] = \
     T2_StSI.compute_constants(self, n, M)
     [self.a, self.alpha, self.beta] = [a, alpha, beta]
     self.gamma = np.true_divide(M, n-1)
     self.c = np.true_divide(self.gamma, beta-alpha)
Exemplo n.º 25
0
def this_create_total_dict(fromPercent, toPercent):
    input_dir="/tmp/fvs_fun/fv_deep_funneled/"
    total = len(os.listdir(input_dir))
    start_index = int(fromPercent*total)
    end_index =  int(toPercent*total)
    fv_dict = dict()
    count=0;
    
    for name in os.listdir(input_dir)[start_index:end_index]:
        count=count+1
        input_file = os.path.join(input_dir,name)
        
        with open(input_file) as f:
            #w, h = [float(x) for x in f.readline().split()]
            #array = [[float(x) for x in line.split()] for line in f]
            s= numpy.genfromtxt(input_file, delimiter=',')
            word1 = "".join(re.findall("[a-zA-Z]+", name))
            word = word1[:-3]
            if word in fv_dict:
                fv_dict[word].append([numpy.true_divide(s,numpy.linalg.norm(s,ord=2))])
                #fv_dict[word]= numpy.concatenate(  (fv_dict[word],  numpy.true_divide(s,numpy.linalg.norm(s,ord=2))) ,axis=1)
            else:
                fv_dict[word]=[numpy.true_divide(s,numpy.linalg.norm(s,ord=2))]
            print count
        #if len(fv_dict.keys())>50:
        #    break;
    return fv_dict
def normalize(ndarray_list):
	""" Normalize each ndarray and scale all
	> All ndarray must have same dimensions!

	Parameters :
	- ndarray_list : ndarray list of images


	Returns :
	- ndarray : ndarray in wich each pixel match to pixels normalize

	"""
	liste = []
	lenght = len(ndarray_list)
	print "Taille de la liste d'image à traiter : "+ str(lenght)
	# 1) Normalize each flat field, i.e. divide it by its mean entry value
	for i in range(lenght):
		print "Normalize 1/2 : " + str(i+1) + "/" + str(lenght)
		mean = np.mean(ndarray_list[i]) # Find the mean of the ndarray
		print "Moyenne de la " + str(i+1) + " image : " + str(mean)
		liste.append(mean)
		ndarray_list[i] = np.true_divide(ndarray_list[i], mean) # Divide ndarray by its mean, normalizing it
	# 2) Scale all of the fields’ means so that their individual averages are equal to one another
	meanofmean = sum(liste) / len(liste)  # Find the mean of the total set of ndarray_list
	print "Moyenne global de toutes les images : " + str(meanofmean)
	for i in range(lenght):
		print "Normalize 2/2 with : "+ str(i+1) + "/" + str(lenght)
		ndarray_list[i] = np.multiply(ndarray_list[i], np.true_divide(meanofmean, np.mean(ndarray_list[i]))) # Divide
	return ndarray_list
Exemplo n.º 27
0
def compare_cdf_g(model, n_obs=10000, case='T2s',
                  t_vector=np.arange(0, 100, 0.1), path2ms='./'):
    # Do a graphical comparison by plotting the theoretical cdf and the 
    # empirical pdf
    T_list_ms = np.true_divide(model.T_list,2)
    M_list = model.M_list
    if case == 'T2s':
        cmd = create_ms_command_T2(n_obs, model.n, T_list_ms, M_list, 'same')
        theor_cdf = model.cdf_T2s
    elif case == 'T2d':
        cmd = create_ms_command_T2(n_obs, model.n, T_list_ms, M_list, 'disctint')
        theor_cdf = model.cdf_T2d
    else:
        return 1
    
    obs = simulate_T2_ms(cmd, path2ms)
    obs = np.array(obs) * 2
    
    delta = t_vector[-1] - t_vector[-2]
    bins = t_vector
    
    f_obs = np.histogram(obs, bins=bins)[0]
    cum_f_obs = [0] + list(f_obs.cumsum())
    F_obs = np.true_divide(np.array(cum_f_obs), n_obs)
    F_theory = [theor_cdf(t) for t in bins]    
    
    # Now we plot
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.plot(bins, F_obs, '-r', label = 'Empirical cdf')
    ax.plot(bins, F_theory, '-b', label = 'Theoretical cdf')
    
    plt.legend()
    plt.show()
Exemplo n.º 28
0
def make_bdt(data,label,weight):

    # create and save tree model

    # configure weights
    wp = len(np.where(label == 1)[0])
    wd = len(np.where(label == 0)[0])
    print 'Scale pos. weight: {}'.format(3.*np.true_divide(wd,wp))

    # setup parameters for xgboost
    param = {}
    # use logistic regression loss, use raw prediction before logistic transformation
    # since we only need the rank
    param['objective'] = 'binary:logistic'
    # scale weight of positive examples
    param['scale_pos_weight'] = 3.*np.true_divide(wd,wp)
    param['eta']               = 0.05
    param['eval_metric']       = 'error'
    param['silent']            = 1
    param['nthread']           = 6
    param['min_child_weight']  = 2
    param['max_depth']         = 10
    param['gamma']             = 0.0
    param['colsample_bytree']  = 0.8
    param['subsample']         = 0.9
    param['reg_alpha']         = 1e-5

    # boost 25 tres
    num_round = 300
    
    # make dmatrices from xgboost
    dtrain = xgb.DMatrix( data, label=label )
    bst    = xgb.train(plst, dtrain, num_round)
        
    return bst
Exemplo n.º 29
0
def sigmoid_inverse(z):
    # z = 0.998*z + 0.001
    assert(np.max(z) <= 1.0 and np.min(z) >= 0.0)
    z = 0.998*z + 0.001
    return np.log(np.true_divide(
        1.0, (np.true_divide(1.0, z) - 1)
    ))
Exemplo n.º 30
0
    def general_work(self, input_items, output_items):
        in0 = input_items[0]
	flags = input_items[1]
        out = output_items[0]
	if len(in0) !=len(flags):
	  print "TX:input length buffer DOESN'T EQUAL"
	len_in = min(len(in0), len(flags))
	if self.state == 0:
	  for i in range(len_in):
	    if flags[i] == 1:
	      print "TX: FLAG FOUND!!!", i
              self.state =1
	      self.consume(0,i+1)
	      self.consume(1,i+1)
              return 0
        if self.state == 1:
	  Y = in0[-self.pilot_length:0]
	  corr = np.true_divide(np.dot(Y.transpose(),self.pilot_seq.conj()),self.pilot_length)
	  A = np.dot(corr,corr.transpose().conj())
	  B = np.true_divide(np.dot(Y.transpose(),Y.conj()),self.pilot_length)
	  Omega_hat = B-A
	  Omega = Omega_hat - self.NHAT + np.identity(self.nt)
	  Sigma = np.dot(np.linalg.pinv(Omega)-np.linalg.pinv(B),self.weight) #nt x nt
	  #=====================debugging msg========================
	  print "Pilot detected! TX sync Sigma ="
	  print Sigma
          out[:]=Sigma.reshape(self.nt*self.nt)
	  self.consume(0,len_in)
	  self.consume(1,len_in)
          return 1
Exemplo n.º 31
0
def div0( a, b ):
    """ ignore / 0, div0( [-1, 0, 1], 0 ) -> [0, 0, 0] """
    with np.errstate(divide='ignore', invalid='ignore'):
        c = np.true_divide( a, b )
        c[ ~ np.isfinite( c )] = 0  # -inf inf NaN
    return c
Exemplo n.º 32
0
def run_analysis(opts):

    #% Set up paths:    
    traceid = '%s_s2p'%(opts. traceid)
    #% Set up paths:    
    acquisition_dir = os.path.join(opts.rootdir, opts.animalid, opts.session, opts.acquisition)
    traceid_dir = os.path.join(acquisition_dir, opts.retino_run, 'traces',traceid)

    file_dir = os.path.join(traceid_dir,'retino_analysis','files')
    run_dir = traceid_dir.split('/traces')[0]
    trace_arrays_dir = os.path.join(traceid_dir,'files')


    #Output paths
    fig_base_dir = os.path.join(traceid_dir,'retino_analysis','figures')
    if not os.path.exists(fig_base_dir):
        os.makedirs(fig_base_dir)
    file_out_dir = os.path.join(traceid_dir,'retino_analysis','files')
    if not os.path.exists(file_out_dir):
        os.makedirs(file_out_dir)


    # Get associated RUN info:
    runmeta_path = os.path.join(run_dir, '%s.json' % opts.retino_run)
    with open(runmeta_path, 'r') as r:
        runinfo = json.load(r)

    nslices = len(runinfo['slices'])
    nchannels = runinfo['nchannels']
    nvolumes = runinfo['nvolumes']
    ntiffs = runinfo['ntiffs']
    frame_rate = runinfo['frame_rate']

    #-----Get info from paradigm file
    para_file_dir = os.path.join(run_dir,'paradigm','files')
    if not os.path.exists(para_file_dir): os.makedirs(para_file_dir)
    para_files =  [f for f in os.listdir(para_file_dir) if f.endswith('.json')]#assuming a single file for all tiffs in run
    if len(para_files) == 0:
        # Paradigm info not extracted yet:
        raw_para_files = [f for f in glob.glob(os.path.join(run_dir, 'raw*', 'paradigm_files', '*.mwk')) if not f.startswith('.')]
        print run_dir
        assert len(raw_para_files) == 1, "No raw .mwk file found, and no processed .mwk file found. Aborting!"
        raw_para_file = raw_para_files[0]           
        print "Extracting .mwk trials: %s" % raw_para_file 
        fn_base = os.path.split(raw_para_file)[1][:-4]
        trials = mw.extract_trials(raw_para_file, retinobar=True, trigger_varname='frame_trigger', verbose=True)
        para_fpath = mw.save_trials(trials, para_file_dir, fn_base)
        para_file = os.path.split(para_fpath)[-1]
    else:
        assert len(para_files) == 1, "Unable to find unique .mwk file..."
        para_file = para_files[0]

    print 'Getting paradigm file info from %s'%(os.path.join(para_file_dir, para_file))

    with open(os.path.join(para_file_dir, para_file), 'r') as r:
        parainfo = json.load(r)


    #get masks
    curr_slice = 'Slice01'#hard-coding planar data for now

    masks_fn = os.path.join(file_out_dir,'masks.hdf5')
    mask_file = h5py.File(masks_fn, 'r')

    iscell = np.array(mask_file[curr_slice]['iscell'])
    mask_array = np.array(mask_file[curr_slice]['mask_array'])
    mask_file.close()

    #fid = 1
    for fid in range(1,ntiffs+1):
        trace_file = [f for f in os.listdir(trace_arrays_dir) if 'File%03d'%(fid) in f and f.endswith('hdf5')][0]
        trace_fn = os.path.join(trace_arrays_dir,trace_file)
        print(trace_fn)

        rawfile = h5py.File(trace_fn, 'r')

        frametimes = np.array(rawfile[curr_slice]['frames_tsec'])
        roi_trace = np.transpose(rawfile[curr_slice]['traces']['pixel_value']['cell'])
        rawfile.close()

        stimulus = parainfo[str(fid)]['stimuli']['stimulus']
        stimfreq = parainfo[str(fid)]['stimuli']['scale']

        #make figure directory for stimulus type
        fig_out_dir = os.path.join(fig_base_dir, stimulus)
        if not os.path.exists(fig_out_dir):
            os.makedirs(fig_out_dir)



        #Get fft  
        print('Getting fft....')
        fourier_data = np.fft.fft(roi_trace)


        nrois,nframes = roi_trace.shape



        #Get magnitude and phase data
        print('Analyzing phase and magnitude....')
        mag_data=abs(fourier_data)
        phase_data=np.angle(fourier_data)

        #label frequency bins
        freqs = np.fft.fftfreq(nframes, float(1/frame_rate))
        idx = np.argsort(freqs)
        freqs=freqs[idx]

        #sort magnitude and phase data
        mag_data=mag_data[:,idx]
        phase_data=phase_data[:,idx]

        #excluding DC offset from data
        freqs=freqs[np.round(nframes/2)+1:]
        mag_data=mag_data[:,np.round(nframes/2)+1:]
        phase_data=phase_data[:,np.round(nframes/2)+1:]

        freq_idx=np.argmin(np.absolute(freqs-stimfreq))#find out index of stimulation freq
        top_freq_idx=np.where(freqs>1)[0][0]#find out index of 1Hz, to cut-off zoomed out plot
        max_mod_idx=np.argmax(mag_data[:,freq_idx],0)#best pixel index

        #unpack values from frequency analysis
        mag_array = mag_data[:,freq_idx]                    
        phase_array = phase_data[:,freq_idx]      

        #get magnitude ratio
        tmp=np.copy(mag_data)
        np.delete(tmp,freq_idx,1)
        nontarget_mag_array=np.sum(tmp,1)
        mag_ratio_array=mag_array/nontarget_mag_array

        #bootstrap to get null-distribution for mag ratio
        nreps = 1000
        zscore_bootstrap = np.empty((nrois,))
        ratio_array = np.empty((nreps,))
        #ridx = 76
        for ridx in range(nrois):
            for rep in range(nreps):

                roi_mag = mag_data[ridx,:]
                shuffle_mag = np.random.permutation(roi_mag)
                mag_value = shuffle_mag[freq_idx]  
                tmp  = np.copy(shuffle_mag)
                np.delete(tmp,freq_idx,0)
                nontarget_mag_value=np.sum(tmp,0)
                ratio_array[rep] = mag_value/nontarget_mag_value
                
            zscore_bootstrap[ridx] = (mag_ratio_array[ridx] - np.mean(ratio_array))/np.std(ratio_array)


        #do regression, get some info from fit
        t=frametimes*(2*np.pi)*stimfreq
        phi=np.expand_dims(phase_array,1)
        varexp_array, beta_array, signal_fit = do_regression(t,phi,roi_trace,roi_trace.shape[0],roi_trace.shape[1])

        print('Saving data to file')
        file_grp = h5py.File(os.path.join(file_out_dir,'File%03d_retino_data.hdf5'%(fid)),  'w')

        #file_grp.attrs['frame_rate'] = frame_rate
        file_grp.attrs['stimfreq'] = stimfreq



        #save data values to structure
        if 'mag_array' not in file_grp.keys():
            magset = file_grp.create_dataset('/'.join([curr_slice,'mag_array']),mag_array.shape, mag_array.dtype)
            magset[...] = mag_array
        if 'phase_array' not in file_grp.keys():
            phaseset = file_grp.create_dataset('/'.join([curr_slice,'phase_array']),phase_array.shape, phase_array.dtype)
            phaseset[...] = phase_array
        if 'mag_ratio_array' not in file_grp.keys():
            ratioset = file_grp.create_dataset('/'.join([curr_slice,'mag_ratio_array']),mag_ratio_array.shape, mag_ratio_array.dtype)
            ratioset[...] = mag_ratio_array
        if 'ratio_bootstrap_zscore' not in file_grp.keys():
            bstrapset = file_grp.create_dataset('/'.join([curr_slice,'ratio_bootstrap_zscore']),zscore_bootstrap.shape, zscore_bootstrap.dtype)
            bstrapset[...] = zscore_bootstrap
        if 'beta_array' not in file_grp.keys():
            betaset = file_grp.create_dataset('/'.join([curr_slice,'beta_array']),beta_array.shape, beta_array.dtype)
            betaset[...] = beta_array
        if 'var_exp_array' not in file_grp.keys():
            varset = file_grp.create_dataset('/'.join([curr_slice,'var_exp_array']),varexp_array.shape, varexp_array.dtype)
            varset[...] = varexp_array

        # Add fit signal to retino output:
        if 'signal_fit' not in file_grp.keys():
            fitset = file_grp.create_dataset('/'.join([curr_slice,'signal_fit']), signal_fit.shape, signal_fit.dtype)
            fitset[...] = signal_fit

        if 'masks' not in file_grp.keys():
            mset = file_grp.create_dataset('/'.join([curr_slice,'masks']),mask_array.shape, mask_array.dtype)
            mset[...] = mask_array
        file_grp.close()


        #VISUALIZE!!!
        print('Visualizing results')
        print('Output folder: %s'%(fig_out_dir))
        #visualize pixel-based result

        #make figure directory for stimulus type
        fig_dir = os.path.join(fig_out_dir, 'File%03d_%s' % (fid, curr_slice),'spectrum')
        if not os.path.exists(fig_dir):
            os.makedirs(fig_dir)

        for midx in range(nrois):
            fig_name = 'full_spectrum_mask%04d.png' %(midx)
            fig=plt.figure()
            plt.plot(freqs,mag_data[midx,:])
            plt.xlabel('Frequency (Hz)',fontsize=16)
            plt.ylabel('Magnitude',fontsize=16)
            axes = plt.gca()
            ymin, ymax = axes.get_ylim()
            plt.axvline(x=freqs[freq_idx], linewidth=1, color='r')
            plt.savefig(os.path.join(fig_dir,fig_name))
            plt.close()

        for midx in range(nrois):
            fig_name = 'zoom_spectrum_mask%04d.png' %(midx)
            fig=plt.figure()
            plt.plot(freqs[0:top_freq_idx],mag_data[midx,0:top_freq_idx])
            plt.xlabel('Frequency (Hz)',fontsize=16)
            plt.ylabel('Magnitude',fontsize=16)
            axes = plt.gca()
            ymin, ymax = axes.get_ylim()
            plt.axvline(x=freqs[freq_idx], linewidth=1, color='r')
            plt.savefig(os.path.join(fig_dir,fig_name))
            plt.close()


        fig_dir = os.path.join(fig_out_dir, 'File%03d_%s' % (fid, curr_slice),'timecourse')
        if not os.path.exists(fig_dir):
            os.makedirs(fig_dir)

        stimperiod_t=np.true_divide(1,stimfreq)
        stimperiod_frames=stimperiod_t*frame_rate
        periodstartframes=np.round(np.arange(0,len(frametimes),stimperiod_frames))[:]
        periodstartframes = periodstartframes.astype('int')

        for midx in range(nrois):
            fig_name = 'timecourse_fit_mask%04d.png' %(midx)
            fig=plt.figure()
            plt.plot(frametimes,roi_trace[midx,:],'b')
            plt.plot(frametimes,signal_fit[midx,:],'r')
            plt.xlabel('Time (s)',fontsize=16)
            plt.ylabel('Pixel Value',fontsize=16)
            axes = plt.gca()
            ymin, ymax = axes.get_ylim()
            for f in periodstartframes:
                    plt.axvline(x=frametimes[f], linewidth=1, color='k')
            axes.set_xlim([frametimes[0],frametimes[-1]])
            plt.savefig(os.path.join(fig_dir,fig_name))
            plt.close()


        # #Read in average image (for viuslization)
        masks_fn = os.path.join(file_out_dir,'masks.hdf5')
        mask_file = h5py.File(masks_fn, 'r')

        im0 = np.array(mask_file[curr_slice]['meanImg'])
        mask_file.close()
        szx,szy = im0.shape



        im1 = np.uint8(np.true_divide(im0,np.max(im0))*255)
        im2 = np.dstack((im1,im1,im1))

        #set phase map range for visualization
        phase_array_disp=np.copy(phase_array)
        phase_array_disp[phase_array<0]=-phase_array[phase_array<0]
        phase_array_disp[phase_array>0]=(2*np.pi)-phase_array[phase_array>0]



        #mark rois
        magratio_roi = np.empty((szy,szx))
        magratio_roi[:] = np.NAN

        mag_roi = np.copy(magratio_roi)
        varexp_roi = np.copy(magratio_roi)
        phase_roi = np.copy(magratio_roi)

        for midx in range(nrois):
            if iscell[midx]:
                maskpix = np.where(np.squeeze(mask_array[midx,:,:]))
                #print(len(maskpix))
                magratio_roi[maskpix]=mag_ratio_array[midx]
                mag_roi[maskpix]=mag_array[midx]
                varexp_roi[maskpix]=varexp_array[midx]
                phase_roi[maskpix]=phase_array_disp[midx]

        fig_dir = fig_out_dir
        data_str = 'File%03d_%s'%(fid,curr_slice)

        sns.set_style("darkgrid", {'axes.grid' : True})

        fig_name = 'phase_mag_ratio_joint_%s.png' % data_str 
        p = sns.jointplot(phase_array_disp,mag_ratio_array,xlim = (0,2*np.pi))
        p.set_axis_labels(xlabel='Phase', ylabel='Mag Ratio',fontsize = 15)
        p.savefig(os.path.join(fig_dir,fig_name))
        plt.close()

        sns.set_style("whitegrid", {'axes.grid' : False})


        fig_name = 'phase_info_%s.png' % data_str 
        fig=plt.figure()
        plt.imshow(im2,'gray')
        plt.imshow(phase_roi,'nipy_spectral',alpha = 0.5,vmin=0,vmax=2*np.pi)
        plt.colorbar()
        plt.savefig(os.path.join(fig_dir,fig_name))
        plt.close()


        fig_name = 'mag_info_%s.png' % data_str 
        fig=plt.figure()
        plt.imshow(im2,'gray')
        plt.imshow(mag_roi, alpha = 0.5)
        plt.colorbar()
        plt.savefig(os.path.join(fig_dir,fig_name))
        plt.close()

        fig_name = 'mag_ratio_info_%s.png' % data_str 
        fig=plt.figure()
        plt.imshow(im2,'gray')
        plt.imshow(magratio_roi, alpha = 0.5)
        plt.colorbar()
        plt.savefig(os.path.join(fig_dir,fig_name))
        plt.close()

        fig_name = 'varexp_info_%s.png' % data_str 
        fig=plt.figure()
        plt.imshow(im2,'gray')
        plt.imshow(varexp_roi, alpha = 0.5)
        plt.colorbar()
        plt.savefig(os.path.join(fig_dir,fig_name))
        plt.close()

        fig_name = 'phase_nice_%s.png' % data_str #curr_file #(tiff_fn[:-4])
        dpi = 80
        szY,szX = im1.shape
        # What size does the figure need to be in inches to fit the image?
        figsize = szX / float(dpi), szY / float(dpi)
        # Create a figure of the right size with one axes that takes up the full figure
        fig = plt.figure(figsize=figsize)
        ax = fig.add_axes([0, 0, 1, 1])
        # Hide spines, ticks, etc.
        ax.axis('off')
        ax.imshow(im2,'gray')
        ax.imshow(phase_roi,'nipy_spectral',alpha = 0.5,vmin=0,vmax=2*np.pi)
        fig.savefig(os.path.join(fig_dir,fig_name), dpi=dpi, transparent=True)
        plt.close()

        std_thresh = .008

        phase_roi_thresh = np.copy(phase_roi)
        phase_roi_thresh[magratio_roi<std_thresh] = np.nan
        fig_name = 'phase_nice_thresh_%s.png' % data_str #curr_file #(tiff_fn[:-4])
        dpi = 80
        szY,szX = im1.shape
        # What size does the figure need to be in inches to fit the image?
        figsize = szX / float(dpi), szY / float(dpi)
        # Create a figure of the right size with one axes that takes up the full figure
        fig = plt.figure(figsize=figsize)
        ax = fig.add_axes([0, 0, 1, 1])
        # Hide spines, ticks, etc.
        ax.axis('off')
        ax.imshow(im2,'gray')
        ax.imshow(phase_roi_thresh,'nipy_spectral',alpha = 0.5,vmin=0,vmax=2*np.pi)
        fig.savefig(os.path.join(fig_dir,fig_name), dpi=dpi, transparent=True)
        plt.close()

        #correct orientation
        phase_roi_corr = np.transpose(phase_roi)
        phase_roi_corr = np.flip(phase_roi_corr,0)
        phase_roi_corr = np.flip(phase_roi_corr,1)

        im1_corr = np.transpose(im1)
        im1_corr = np.flip(im1_corr,0)
        im1_corr = np.flip(im1_corr,1)

        im2_corr = np.dstack((im1_corr,im1_corr,im1_corr))




        fig_name = 'phase_nice_corrected_%s.png' % data_str #curr_file #(tiff_fn[:-4])
        dpi = 80
        szY,szX = im1.shape
        # What size does the figure need to be in inches to fit the image?
        figsize = szX / float(dpi), szY / float(dpi)
        # Create a figure of the right size with one axes that takes up the full figure
        fig = plt.figure(figsize=figsize)
        ax = fig.add_axes([0, 0, 1, 1])
        # Hide spines, ticks, etc.
        ax.axis('off')
        ax.imshow(im2_corr,'gray')
        ax.imshow(phase_roi_corr,'nipy_spectral',alpha = 0.5,vmin=0,vmax=2*np.pi)
        fig.savefig(os.path.join(fig_dir,fig_name), dpi=dpi, transparent=True)
        plt.close()

        phase_roi_corr_thresh = np.transpose(phase_roi_thresh)
        phase_roi_corr_thresh = np.flip(phase_roi_corr_thresh,0)
        phase_roi_corr_thresh = np.flip(phase_roi_corr_thresh,1)

        fig_name = 'phase_nice_corrected_thresh_%s.png' % data_str #curr_file #(tiff_fn[:-4])
        dpi = 80
        szY,szX = im1.shape
        # What size does the figure need to be in inches to fit the image?
        figsize = szX / float(dpi), szY / float(dpi)
        # Create a figure of the right size with one axes that takes up the full figure
        fig = plt.figure(figsize=figsize)
        ax = fig.add_axes([0, 0, 1, 1])
        # Hide spines, ticks, etc.
        ax.axis('off')
        ax.imshow(im2_corr,'gray')
        ax.imshow(phase_roi_corr_thresh,'nipy_spectral',alpha = 0.5,vmin=0,vmax=2*np.pi)
        fig.savefig(os.path.join(fig_dir,fig_name), dpi=dpi, transparent=True)
        plt.close()
Exemplo n.º 33
0
def nnff(nn, x, y):
    #NNFF performs a feed forward pass
    # nn = nnff(nn,x,y) returns a neural network structure with updated layer activations,
    # error and loss (nn.a,nn.e,nn.L)

    n = nn.N
    m = x.shape[0]
    temp = np.ones([m, x.shape[1] + 1], dtype=np.float64)
    temp[:, 1:] = x
    #temp[:,1:] = np.copy(x)

    # Allocate space for activation, error and loss
    # Since matlab allows structs to have cells/arrays etc on the fly,
    # This is not possible with python. We declare members and then use them
    nn.A[0] = np.copy(temp)

    # Feedforward pass
    for i in range(1, n - 1):
        if nn.ActivationFunction == 'sigm':
            #Caluclate the unit's outputs (including the units bias term)
            r = np.dot(nn.A[i - 1], nn.W[i - 1].T)
            nn.A[i] = sigm.sigm(r)
        elif nn.ActivationFunction == 'tanh_opt':
            r = np.dot(nn.A[i - 1], nn.W[i - 1].T)
            nn.A[i] = tanh_opt.tanh_opt(r)

        # Droput
        if nn.DropoutFraction > 0:
            if nn.Testing == True:
                nn.A[i] = np.multiply(nn.A[i], (1 - nn.DropoutFraction))
            else:
                nn.DropOutMask[i] = np.random.uniform(1, 0, nn.A[i].shape)
                # Get the indices and convert them to floats
                indices = [nn.DropOutMask[i] > nn.DropoutFraction]
                tempMask = np.where(indices, 1, 0).astype(
                    np.float64)  # convert binary to float
                nn.DropOutMask[i] = tempMask.squeeze(
                )  # Remove the extra unwanted dimension
                nn.A[i] = np.multiply(nn.A[i], nn.DropOutMask[i])
                nn.A[i][
                    nn.A[i] ==
                    0.] = 0  # Remove the negative zeros that propagate to cause large errors in testing phase

        # Calculate running exponential activations for use with sparsity

        if nn.NonSparsityPenalty > 0:
            pass

        #Add Bias term
        biasTerm = np.ones(shape=(nn.A[i].shape[0], nn.A[i].shape[1] + 1.0),
                           dtype=np.float64)
        biasTerm[:, 1:] = nn.A[i]
        nn.A[i] = biasTerm

    if nn.Output == 'sigm':
        nn.A[n - 1] = sigm.sigm(np.dot(nn.A[n - 2], nn.W[n - 2].T))
    elif nn.Output == 'linear':
        nn.A[n - 1] = np.dot(nn.A[n - 2], nn.W[n - 2].T)
    elif nn.Output == 'softmax':
        nn.A[n - 1] = np.dot(nn.A[n - 2], nn.W[n - 2].T)
        maxVector = nn.A[n - 1].argmax(axis=1)  # Returns a flattened array
        maxVector = maxVector.reshape(maxVector.shape[0],
                                      1)  # Convert to coloumn vector Mx1
        nn.A[n - 1] = np.exp(np.subtract(nn.A[n - 1], maxVector))
        sumVector = np.sum(nn.A[n - 1], 1)
        sumVector = sumVector.reshape(sumVector.shape[0],
                                      1)  # Convert to coloumn vector Mx1
        nn.A[n - 1] = np.true_divide(nn.A[n - 1], sumVector)

    # Error and Loss
    nn.E = y - nn.A[n - 1]

    if nn.Output == 'sigm' or nn.Output == 'linear':
        nn.L = 0.5 * np.sum(np.power(nn.E, 2)) / m
    elif nn.Output == "softmax":
        # Try removing values that are too small. This helps countering the invalid value encountered in log warning
        eps = 1e-50
        nn.A[n - 1][nn.A[n - 1] < eps] = eps
        nn.L = -1.0 * np.sum(np.multiply(y, np.log(nn.A[n - 1]))) / m
    return nn
Exemplo n.º 34
0
def gini_index(X, y):
    """
    This function implements the gini index feature selection.

    Input
    ----------
    X: {numpy array}, shape (n_samples, n_features)
        input data
    y: {numpy array}, shape (n_samples,)
        input class labels

    Output
    ----------
    gini: {numpy array}, shape (n_features, )
        gini index value of each feature
    """

    n_samples, n_features = X.shape

    # initialize gini_index for all features to be 0.5
    gini = np.ones(n_features) * 0.5

    # For i-th feature we define fi = x[:,i] ,v include all unique values in fi
    for i in range(n_features):
        v = np.unique(X[:, i])
        for j in range(len(v)):
            # left_y contains labels of instances whose i-th feature value is less than or equal to v[j]
            print('X', X[:, i])
            print('V', v[j])
            left_y = y[X[:, i] <= v[j]]
            # right_y contains labels of instances whose i-th feature value is larger than v[j]
            right_y = y[X[:, i] > v[j]]

            # gini_left is sum of square of probability of occurrence of v[i] in left_y
            # gini_right is sum of square of probability of occurrence of v[i] in right_y
            gini_left = 0
            gini_right = 0

            for k in range(np.min(y), np.max(y) + 1):
                if len(left_y) != 0:
                    # t1_left is probability of occurrence of k in left_y
                    t1_left = np.true_divide(len(left_y[left_y == k]),
                                             len(left_y))
                    t2_left = np.power(t1_left, 2)
                    gini_left += t2_left

                if len(right_y) != 0:
                    # t1_right is probability of occurrence of k in left_y
                    t1_right = np.true_divide(len(right_y[right_y == k]),
                                              len(right_y))
                    t2_right = np.power(t1_right, 2)
                    gini_right += t2_right

            gini_left = 1 - gini_left
            gini_right = 1 - gini_right

            # weighted average of len(left_y) and len(right_y)
            t1_gini = (len(left_y) * gini_left + len(right_y) * gini_right)

            # compute the gini_index for the i-th feature
            value = np.true_divide(t1_gini, len(y))

            if value < gini[i]:
                gini[i] = value
    return gini
Exemplo n.º 35
0
def percentile(a, q, *args, **kwargs):
    q = np.true_divide(q, 100)
    return quantile(a, q, *args, **kwargs)
Exemplo n.º 36
0
def interpolate(w, beta):
    return (
        (1 - beta) *
        (np.true_divide(np.linalg.norm(w, ord=1), w.shape[1]))) + (beta * w)
Exemplo n.º 37
0
def flatten_normal_map(normal_map, visualize=False):

	sigma = 64.0

	normal_map_mask = np.isnan(normal_map)
	normal_map_smoothed = normal_map.copy()
	normal_map_smoothed[normal_map_mask] = 0.0
	normal_map_smoothed = skimage.filters.gaussian(normal_map_smoothed, sigma=sigma, multichannel=True)
	normal_map_smoothed_nans = skimage.filters.gaussian((~normal_map_mask).astype(np.float), sigma=sigma, multichannel=True)
	
	normal_map_smoothed = np.true_divide(
		normal_map_smoothed,
		normal_map_smoothed_nans,
		out=np.zeros_like(normal_map_smoothed),
		where=normal_map_smoothed_nans != 0.0
	)

	norms = np.zeros((normal_map_smoothed.shape[0], normal_map_smoothed.shape[1], 3))
	norms[:,:,0] = np.linalg.norm(normal_map_smoothed, axis=2)
	norms[:,:,1] = norms[:,:,0]
	norms[:,:,2] = norms[:,:,0]
	normal_map_smoothed = np.true_divide(
		normal_map_smoothed,
		norms,
		out=np.full_like(normal_map_smoothed, np.nan),
		where=norms != 0.0
	)

	flat_normal_map = normal_map + (np.array([0.0, 0.0, 1.0]) - normal_map_smoothed)
	norms = np.zeros((flat_normal_map.shape[0], flat_normal_map.shape[1], 3))
	norms[:,:,0] = np.linalg.norm(flat_normal_map, axis=2)
	norms[:,:,1] = norms[:,:,0]
	norms[:,:,2] = norms[:,:,0]
	flat_normal_map = np.true_divide(
		flat_normal_map,
		norms,
		out=np.zeros_like(flat_normal_map),
		where=norms != 0.0
	)

	nm_min = -1.0
	nm_max = 1.0
	normal_map_scaled = (normal_map - nm_min) / (nm_max - nm_min)
	normal_map_smoothed_scaled = (normal_map_smoothed - nm_min) / (nm_max - nm_min)
	flat_normal_map_scaled = (flat_normal_map - nm_min) / (nm_max - nm_min)

	if visualize:

		fig = plt.figure(figsize=(16,16))

		ax = fig.add_subplot(2, 2, 1)
		ax.grid(False)
		plt.imshow(normal_map_scaled, origin='upper')
		plt.title('Normal Map')

		ax = fig.add_subplot(2, 2, 2)
		ax.grid(False)
		plt.imshow(normal_map_smoothed_scaled, origin='upper')
		plt.title('Normal Map Smoothed')

		ax = fig.add_subplot(2, 2, 3)
		ax.grid(False)
		plt.imshow(flat_normal_map_scaled, origin='upper')
		plt.title('Flat Normal Map')
	
		plt.show()

	return flat_normal_map
Exemplo n.º 38
0
##print len(four[0]),my_data[:,0][:len(four)]
#print len(my_data)
import locale
additionlist = []
for ff in range(0, 12):
    tempolist = [0] * 71
    list71 = []
    for i in range(1, len(my_data)):
        templistt = []

        for j in range(0, len(vallist[i])):
            #if my_data[:,1][i]>0:

            if int(my_data[:, ff][i] > 0):
                if (my_data[:, ff][i] *
                        np.true_divide(vallist[i][j], sum(vallist[i]))):
                    templistt.append(
                        my_data[:, ff][i] *
                        np.true_divide(vallist[i][j], sum(vallist[i])))
                else:
                    templistt.append(0)
            else:
                templistt.append(0)
        #print templistt
    #
        list71.append((templistt))
        #print len(list32),'**'
        #print list32
        #time.sleep(1)
    # for i in range(0,len(my_data)): #31
    for j in range(0, 42):
Exemplo n.º 39
0
def run_single(dist_type,
               gap,
               mu_style,
               hyp_style,
               pi1,
               no_arms,
               num_hyp,
               sigma,
               epsilon,
               top_arms,
               alpha0,
               trunctimerange,
               FDR,
               NUMRUN,
               mu_max,
               alg_num=0,
               punif=0,
               cauchyn=0,
               verbose=0,
               precision=1e-8):

    alg_name = alg_list[alg_num]
    numtrunc = len(trunctimerange)

    # Protocol-y results
    time_str = datetime.today().strftime("%m%d%y_%H%M")
    if not os.path.exists('./results'):
        os.makedirs('./results')
    res_filename = './results/output_%s.dat' % time_str
    result_file = open(res_filename, 'w')

    # Load mu_mat with ready to go mu
    (mu_mat, Hypo) = parse_mu.get_mu(dist_type, gap, mu_style, hyp_style, pi1,
                                     no_arms, num_hyp, sigma, epsilon,
                                     top_arms, mu_max)

    #ipdb.set_trace()
    num_alt = sum(Hypo)
    if dist_type == 1:
        bound_type = 'SubGaussian_LIL'
    elif dist_type == 0:
        bound_type = 'Bernoulli_LIL'

    # in fact can get rid of numtrunc, just when you want to debug can check pval at different times
    pval_mat = np.zeros(shape=(numtrunc, num_hyp, NUMRUN))
    rej_mat = np.zeros(shape=(numtrunc, num_hyp, NUMRUN))
    samples_mat = np.zeros(shape=(numtrunc, num_hyp, NUMRUN))
    alpha_mat = np.zeros(shape=(numtrunc, num_hyp, NUMRUN))
    wealth_mat = np.zeros(shape=(numtrunc, num_hyp, NUMRUN))
    FDR_tsr = np.zeros(shape=(numtrunc, num_hyp, NUMRUN))
    falrej_vec = np.zeros([numtrunc, NUMRUN])
    correj_vec = np.zeros([numtrunc, NUMRUN])
    samples_vec = np.zeros([numtrunc, NUMRUN])
    totrej_vec = np.zeros([numtrunc, NUMRUN])
    rightarm_vec = np.zeros([numtrunc, NUMRUN])
    pval_vec = np.zeros(num_hyp)

    for l in range(NUMRUN):

        # Initialize FDR procedures, all first values are to be tossed (non-used, including alpha)
        if FDR == 0:
            proc = GAIPlus.GAI_proc(alpha0)
        elif FDR == 1:
            proc = Lord.LORD_proc(alpha0)
        elif FDR == 2:
            proc = GAI_MW.GAI_MW_proc(alpha0)
        elif FDR == 3:
            proc = wrongFDR.wrongFDR_proc(alpha0)
        elif FDR == 4:
            proc = AlphaInvest.ALPHA_proc(alpha0)
        # dummy wrong FDR, always giving same alpha0 or some other constant
        elif FDR == 5:
            proc = Bonferroni.BONF_proc(alpha0)

        tic = time.time()
        for i in range(num_hyp):

            # Get means of experiment
            mu_list = mu_mat[i]
            this_exp = rowexp_new.rowexp(Hypo[i], no_arms, 1, mu_list)

            if verbose:
                result_file.write("Run: %d\n" % l)
                #result_file.write(mu_list)

            # Draw exp if possibly alg-dependent exp
            this_alpha = proc.alpha[-1]
            if (punif == 1) & (Hypo[i] == 0):
                # Skip experiment
                rightarm_b = 0
                bestarm_idx = -1
            else:
                #if this_alpha == 0:
                #    ipdb.set_trace()
                this_exp.multi_ab(this_alpha,
                                  trunctimerange,
                                  epsilon,
                                  bound_type,
                                  alg_name,
                                  1,
                                  cauchyn,
                                  punif,
                                  verbose=verbose,
                                  precision=precision)
                rightarm_b = this_exp.rightarm
                bestarm_idx = this_exp.bestarm['index']

            # Compute values and all for different truncation times
            for q, trunctime in enumerate(trunctimerange):
                #ipdb.set_trace()
                # Get P values
                if (punif == 1) & (Hypo[i] == 0):
                    pval_mat[q][i][l] = np.random.rand(
                    )  # Uniform if null hypothesis to get some FDR
                    total_samples = 1
                else:
                    #pval_vec[i] = this_exp.pval[q]
                    # Take the min of all p-values you have seen

                    pval_mat[q][i][l] = this_exp.pval[q]
                    #pval_mat[q][i][l] = min(this_exp.pvals)
                    total_samples = this_exp.total_queries[q]

                samples_mat[q][i][l] = total_samples

                # If wealth still positive for that procedure
                if (proc.wealth_vec[-1] >= 0):
                    # Reject
                    rej_mat[q][i][l] = (pval_mat[q][i][l] <=
                                        this_alpha + precision)

                    # Total measures
                    falrej_vec[q][l] = falrej_vec[q][l] + rej_mat[q][i][l] * (
                        1 - Hypo[i])
                    correj_vec[q][
                        l] = correj_vec[q][l] + rej_mat[q][i][l] * Hypo[i]
                    rightarm_vec[q][l] = rightarm_vec[q][l] + (
                        Hypo[i]) * rightarm_b * rej_mat[q][i][l]
                    samples_vec[q][l] = samples_vec[q][l] + total_samples
                    totrej_vec[q][l] = falrej_vec[q][l] + correj_vec[q][l]

                    FDR_tsr[q][i][l] = np.true_divide(falrej_vec[q][l],
                                                      max(totrej_vec[q][l], 1))
                    #ipdb.set_trace()
                    if verbose:
                        result_file.write(
                            "true best: %d, found best: %d, queries: %d \n" %
                            (argmax(mu_list), bestarm_idx, total_samples))
                        result_file.write(
                            "alpha_j: %f, p_j: %f, rej: %d \n" %
                            (this_alpha, pval_mat[q][i][l], rej_mat[q][i][l]))
            if (proc.wealth_vec[-1] >= 0):
                # Get next alpha (and wealth) from FDR if theres a next hypothesis to test
                if i < num_hyp - 1:
                    wealth_mat[q][i][l] = proc.wealth_vec[-1]
                    alpha_mat[q][i][l] = proc.next_alpha(
                        rej_mat[q][i][l])  # use last rejection

        #ipdb.set_trace()
    if verbose:
        result_file.write(
            "Time for one complete experiment with %d hypotheses was %f" %
            (num_hyp, time.time() - tic))

    #ipdb.set_trace()

    # Save data
    dir_name = './dat'

    for q, trunctime in enumerate(trunctimerange):

        #ipdb.set_trace()
        FDR_vec = np.true_divide(
            falrej_vec[q],
            [max(totrej_vec[q][l], 1) for l in range(len(totrej_vec[q]))])
        TDR_vec = np.true_divide(correj_vec[q], num_alt)
        BDR_vec = np.true_divide(rightarm_vec[q], num_alt)
        FDR_mat = FDR_tsr[q]

        pr_filename = 'PR_D%d_MS%d_AG%d_G%.1f_MM%.1f_E%.1f_Si%.1f_TA%d_HS%d_P%.1f_AL%.1f_FDR%d_NH%d_NA%d_TT%d_PU%d_CN%d_NR%d_%s' % (
            dist_type, mu_style, alg_num, gap, mu_max, epsilon, sigma,
            top_arms, hyp_style, pi1, alpha0, FDR, num_hyp, no_arms, trunctime,
            punif, cauchyn, NUMRUN, time_str)
        ad_filename = 'AD_D%d_MS%d_AG%d_G%.1f_MM%.1f_E%.1f_Si%.1f_TA%d_HS%d_P%.1f_AL%.1f_FDR%d_NH%d_NA%d_TT%d_PU%d_CN%d_NR%d_%s' % (
            dist_type, mu_style, alg_num, gap, mu_max, epsilon, sigma,
            top_arms, hyp_style, pi1, alpha0, FDR, num_hyp, no_arms, trunctime,
            punif, cauchyn, NUMRUN, time_str)
        td_filename = 'TD_D%d_MS%d_AG%d_G%.1f_MM%.1f_E%.1f_Si%.1f_TA%d_HS%d_P%.1f_AL%.1f_FDR%d_NH%d_NA%d_TT%d_PU%d_CN%d_NR%d_%s' % (
            dist_type, mu_style, alg_num, gap, mu_max, epsilon, sigma,
            top_arms, hyp_style, pi1, alpha0, FDR, num_hyp, no_arms, trunctime,
            punif, cauchyn, NUMRUN, time_str)

        # Save data
        saveres(dir_name, td_filename, FDR_mat)
        saveres(dir_name, ad_filename, [
            BDR_vec, TDR_vec, FDR_vec, samples_vec[q], falrej_vec[q],
            totrej_vec[q]
        ])
        saveres(
            dir_name, pr_filename, np.r_[rej_mat[q], pval_mat[q], alpha_mat[q],
                                         wealth_mat[q], samples_mat[q]])

    result_file.close()
Exemplo n.º 40
0
statements = full_data['Statement']
speakers = full_data['Speaker']

ratings = ratings.replace(0, 1)
ratings = ratings.replace(2, 1)

ratings = ratings.replace(5, 0)
ratings = ratings.replace(4, 0)
ratings = ratings.replace(3, 0)

encoder = LabelEncoder()
labels = encoder.fit_transform(ratings)
speakers = encoder.fit_transform(speakers)

speakers = np.array(speakers)
speakers = np.true_divide(speakers, speakers.argmax())

statements = preprocess_data(statements, _regular_expressions=True, _remove_capitals=True,
                                   _remove_punctuation=True, _remove_stopwords=True, _stemming=True)

all_words = []
for statement in statements:
    words = word_tokenize(statement)
    for w in words:
        all_words.append(w)


all_words = nltk.FreqDist(all_words)
most_common = all_words.most_common(WORDS_NUMBER)

######################################################################################################
Exemplo n.º 41
0
    validValues = [66, 68, 130, 132]
elif product == "LS8_OLI_LASRC":
    validValues = [322, 386, 834, 898, 1346, 324, 388, 836, 900, 1348]

cloud_mask = isin(nbar["pixel_qa"].values, validValues)

for band in bands:
    datos = np.where(
        np.logical_and(nbar.data_vars[band] != nodata, cloud_mask),
        nbar.data_vars[band], np.nan)
    allNan = ~np.isnan(datos)
    if normalized:
        m = np.nanmean(datos.reshape((datos.shape[0], -1)), axis=1)
        st = np.nanstd(datos.reshape((datos.shape[0], -1)), axis=1)
        datos = np.true_divide(
            (datos - m[:, np.newaxis, np.newaxis]),
            st[:, np.newaxis, np.newaxis]) * np.nanmean(st) + np.nanmean(m)
    medians[band] = np.nanmedian(datos, 0)
    medians[band][np.sum(allNan, 0) < minValid] = np.nan
del datos
period_green = medians["green"]
period_nir = medians["swir1"]
del medians
mask_nan = np.logical_or(np.isnan(period_green), np.isnan(period_nir))
period_ndwi = np.true_divide(np.subtract(period_green, period_nir),
                             np.add(period_green, period_nir))
period_ndwi[mask_nan] = np.nan
#Hace un clip para evitar valores extremos.
period_ndwi[period_ndwi > 1] = 1.0
period_ndwi[period_ndwi < -1] = np.nan
import xarray as xr
Exemplo n.º 42
0
tan = utils.copy_docstring(
    'tf.math.tan',
    lambda x, name=None: np.tan(x))

tanh = utils.copy_docstring(
    'tf.math.tanh',
    lambda x, name=None: np.tanh(x))

top_k = utils.copy_docstring(
    'tf.math.top_k',
    _top_k)

truediv = utils.copy_docstring(
    'tf.math.truediv',
    lambda x, y, name=None: np.true_divide(x, y))

# unsorted_segment_max = utils.copy_docstring(
#     'tf.math.unsorted_segment_max',
#     lambda data, segment_ids, num_segments, name=None: (
#         np.unsorted_segment_max))

# unsorted_segment_mean = utils.copy_docstring(
#     'tf.math.unsorted_segment_mean',
#     lambda data, segment_ids, num_segments, name=None: (
#         np.unsorted_segment_mean))

# unsorted_segment_min = utils.copy_docstring(
#     'tf.math.unsorted_segment_min',
#     lambda data, segment_ids, num_segments, name=None: (
#         np.unsorted_segment_min))
Exemplo n.º 43
0
    # -------------------------------------------------------------------------
    # *** Calculate average within run

    print('------Calculating average within run')

    # In order to reduce memory demands (in case of a large number of runs), we
    # calculate the average time series within runs in a first step, and form
    # the overall average at the end. First, calculate the sum over the fifth
    # dimension (which represents the block number), producing a four-
    # dimensional array:
    aryTmp = np.sum(aryTmpBlcks,
                    axis=0,
                    keepdims=False)

    # Divide by number of blocks:
    aryTmp = np.true_divide(aryTmp, varTmpNumBlck)

    # Append array to list:
    aryRunsAvrgs[index_02, :, :, :, :] = np.copy(aryTmp).astype(np.float32)

# -----------------------------------------------------------------------------
# *** Calculate average across runs

print('---Calculating average across runs')

# Create the sum over the dimension of the array that represents run number,
# producing a four-dimensional array:
aryAvrg = np.sum(aryRunsAvrgs,
                 axis=0,
                 keepdims=False)
Exemplo n.º 44
0
        y = X
        y_hat = decoder
        saver = tf.train.Saver()
        session = tf.Session()
        saver.restore(session, './mnist-autoencoder-99')
        prediction = session.run([decoder], feed_dict={X: im})
        prediction = np.asarray(prediction).reshape((28, 28)) * 255.
        im = im.reshape((28, 28)) * 255.

        image = Image.fromarray(
            np.concatenate((im, prediction), axis=0).astype('uint8'), 'L')
        image.save('/Users/mesuterhanunal/Desktop/images/%d.png' %
                   random.randint(0, 100000))


if __name__ == '__main__':
    X_train, _, X_test, Y_test = mnist.load()
    X_train = np.true_divide(X_train, 255.0)
    X_test = np.true_divide(X_test, 255.0)

    autoencoder = AutoEncoder(
        X_train,
        hidden_sizes=[64, 64, 2],
        num_layers=3,
        activations=['tanh', 'tanh', 'tanh', 'tanh', 'tanh', 'relu'])
    # autoencoder.train(lr=0.00002, epoch=100, batch_size=64)

    for i in range(10):
        idx = np.random.choice(np.argwhere(Y_test == i).reshape(-1), 1)
        autoencoder.test(X_test[idx].reshape((1, 784)))
Exemplo n.º 45
0
def receiver(data):

    data = list(data)

    chirp = CHIRP

    #Find how much to shift to reach the first chirp //Synchronisation
    shifts = shift_finder(chirp, data, SAMPLE_FREQUENCY, window=0)
    shift = shifts[0] + 1

    # Remove Stuff before and after data and split into frames
    # 1) Remove everything up to the beginning of the first chirp
    # 2) Split into frames (unkown number), remove last frame if it's not a full frame
    data = data[shift - CHIRP_BLOCKS_PER_FRAME * PREFIXED_SYMBOL_LENGTH:]
    data = [
        data[i:i + DATA_PER_FRAME] for i in range(0, len(data), DATA_PER_FRAME)
    ]
    if len(data[-1]) != DATA_PER_FRAME:
        del data[-1]

    # Remove the chirp
    data = [
        frame[CHIRP_BLOCKS_PER_FRAME * PREFIXED_SYMBOL_LENGTH:]
        for frame in data
    ]

    # Channel Estimation
    frame = data[0]
    # Split into symbols
    frame = [
        frame[i:i + PREFIXED_SYMBOL_LENGTH]
        for i in range(0, len(frame), PREFIXED_SYMBOL_LENGTH)
    ]

    # Isolate Estimation Symbols
    estimation_symbols = frame[0:KNOWN_DATA_BLOCKS_PER_FRAME]
    estimation_symbols = [norm(symbol) for symbol in estimation_symbols]

    known_symbol = get_known_data()
    known_symbol = norm(known_symbol)

    channel_response = channel_estimation(estimation_symbols, known_symbol)

    # plt.figure()
    # plt.plot(channel_response.real,color='r')
    # plt.plot(channel_response.imag,color='b')
    # plt.show()

    # plt.figure()
    # plt.scatter(channel_response.real,channel_response.imag)

    # Isolate data symbols
    data = [
        frame[KNOWN_DATA_BLOCKS_PER_FRAME *
              PREFIXED_SYMBOL_LENGTH:-PREFIXED_SYMBOL_LENGTH *
              KNOWN_DATA_BLOCKS_PER_FRAME] for frame in data
    ]

    #Remove chirp and take blocks of 4800
    for i, frame in enumerate(data):
        frame = [
            frame[i:i + PREFIXED_SYMBOL_LENGTH][CP:]
            for i in range(0, len(frame), PREFIXED_SYMBOL_LENGTH)
        ]
        data[i] = frame

    data = [symbol for frame in data for symbol in frame]

    #Power Checking
    symbol_powers = np.array(
        [np.sqrt(np.mean(np.square(symbol))) for symbol in data])
    symbol_powers -= np.min(symbol_powers)
    symbol_powers = norm(symbol_powers)

    for i, power in enumerate(symbol_powers):
        if power < 0.1 * 32767:
            data[i] = None

    data = [symbol for symbol in data if symbol]
    #check_typing(data)
    # plt.figure()
    # plt.plot(symbol_powers)
    # plt.show()

    # FFT the symbols
    data = [np.fft.fft(symbol, N) for symbol in data]

    # plt.figure()
    # plt.scatter(np.array(data).real, np.array(data).imag)
    # plt.show()

    # Divide each symbol by channel response
    data = [
        np.true_divide(symbol, channel_response).tolist() for symbol in data
    ]

    # Discard second half of all symbols and keep only symbols in bins 100-1500
    data = [
        symbol[L_PADDING + 1:1 + L_PADDING + CONSTELLATION_VALUES_PER_BLOCK]
        for symbol in data
    ]

    # Flatten into single list of symbols
    data = [value for symbol in data for value in symbol]

    # plt.figure()
    # plt.title("Symbols before demapping")
    # plt.scatter(np.array(data).real, np.array(data).imag)
    # plt.show()

    # Map each symbol to constellation values
    for i, value in enumerate(data):
        # Get distance to all symbols in constellation
        distances = {
            abs(value - const_value): key
            for key, const_value in CONSTELLATION.items()
        }
        # Get minimum distance
        minimum_distance = min(distances.keys())
        # Find symbol matching minimum distance and append
        data[i] = distances[minimum_distance]

    # Make into one big string
    data = "".join(["".join(symbol) for symbol in data])

    return data
Exemplo n.º 46
0
def take_snapshot(camera_position, camera_lookat,
                  is_layer_view) -> typing.Optional[QImage]:
    """
	Take a snapshot of the current scene.
	:param camera_position: The position of the camera to take the snapshot with.
	:param camera_lookat: The position of the focal point of the camera.
	:param is_layer_view: Whether we're looking at layer view or the model itself.
	:return: A screenshot of the current scene.
	"""
    application = cura.CuraApplication.CuraApplication.getInstance()
    plugin_registry = application.getPluginRegistry()

    # Set the camera to the desired position. We'll use the actual camera for the snapshot just because it looks cool while it's busy.
    camera = application.getController().getScene().getActiveCamera()
    camera.setPosition(
        UM.Math.Vector.Vector(camera_position[0], camera_position[2],
                              camera_position[1])
    )  # Note that these are OpenGL coordinates, swapping Y and Z.
    if not camera_lookat:
        bounding_box = UM.Math.AxisAlignedBox.AxisAlignedBox()
        for node in UM.Scene.Iterator.DepthFirstIterator.DepthFirstIterator(
                cura.CuraApplication.CuraApplication.getInstance(
                ).getController().getScene().getRoot()):
            if node.isSelectable():
                bounding_box = bounding_box + node.getBoundingBox()
        camera_lookat = bounding_box.center
    else:
        camera_lookat = UM.Math.Vector.Vector(camera_lookat[0],
                                              camera_lookat[2],
                                              camera_lookat[1])
    camera.lookAt(camera_lookat)
    if abs(camera.getPosition().x - camera_lookat.x) < 0.01 and abs(
            camera.getPosition().z -
            camera_lookat.z) < 0.01:  # Looking straight up or straight down.
        # Make sure the yaw of the camera is consistent regardless of previous position.
        if camera.getPosition().y > camera_lookat.y:
            camera.setOrientation(UM.Math.Quaternion.Quaternion(-2, 0, 0, 2))
        else:
            camera.setOrientation(UM.Math.Quaternion.Quaternion(2, 0, 0, 2))
    time.sleep(
        2
    )  # Some time to update the scene nodes. Don't know if this is necessary but it feels safer.

    # Use a transparent background.
    gl_bindings = UM.View.GL.OpenGL.OpenGL.getInstance().getBindingsObject()
    gl_bindings.glClearColor(0.0, 0.0, 0.0, 0.0)
    gl_bindings.glClear(gl_bindings.GL_COLOR_BUFFER_BIT
                        | gl_bindings.GL_DEPTH_BUFFER_BIT)

    try:  # In Qt6 it's an enum, in Qt5 it's a field of QImage.
        colour_format = QImage.Format.Format_ARGB32  # For Qt6.
    except AttributeError:
        colour_format = QImage.Format_ARGB32  # For Qt5.

    if is_layer_view:
        # Remove any nozzle node. It can get in the way of what we want to see and influence cropping of the image badly.
        simulation_view_plugin = plugin_registry.getPluginObject(
            "SimulationView")
        for node in UM.Scene.Iterator.DepthFirstIterator.DepthFirstIterator(
                application.getController().getScene().getRoot()):
            if hasattr(
                    node, "_createNozzleMesh"
            ):  # This node is a NozzleNode (the actual class is not exposed to us outside the plug-in).
                node.getParent().removeChild(node)

        render_pass = simulation_view_plugin.getSimulationPass()
        render_pass.render()
        time.sleep(1.2)
        screenshot = render_pass.getOutput()
        print("---- screenshot size:", screenshot.width(), "x",
              screenshot.height())
        if screenshot.width() != render_width or screenshot.height(
        ) != render_height:
            print(
                "---- render output not correct size! Resizing window to compensate."
            )
            main_window = application.getMainWindow()
            delta_width = render_width - screenshot.width()
            delta_height = render_height - screenshot.height()
            main_window.setWidth(main_window.width() + delta_width)
            main_window.setHeight(main_window.height() + delta_height)
            return None  # Failed to render. Try again after waiting outside of Qt thread.

        # Remove alpha channel from this picture. We don't want the semi-transparent support since we don't draw the object outline here.
        # Sadly, QImage.convertToFormat has only 2 formats with boolean alpha and they both premultiply. So we'll go the hard way: Through Numpy.
        pixel_bits = screenshot.bits().asarray(screenshot.sizeInBytes())
        pixels = numpy.frombuffer(pixel_bits, dtype=numpy.uint8).reshape(
            [screenshot.height(), screenshot.width(), 4])
        opaque = numpy.nonzero(pixels[:, :, 0])
        pixels[opaque[0], opaque[1], 3] = 255
        return QImage(
            pixels.data, pixels.shape[1], pixels.shape[0], colour_format
        ).copy(
        )  # Make a copy because the pixel data will go out of scope for Numpy, so that would be invalid memory.
    else:  # Render the objects themselves! Going to be quite complex here since the render is highly specialised in what it shows and what it doesn't.
        view = plugin_registry.getPluginObject("SolidView")
        view._checkSetup()
        renderer = view.getRenderer()

        support_angle = application.getGlobalContainerStack().getProperty(
            "support_angle", "value")
        view._enabled_shader.setUniformValue(
            "u_overhangAngle", math.cos(
                math.radians(90 - support_angle)))  # Correct overhang angle.
        view._enabled_shader.setUniformValue(
            "u_lowestPrintableHeight",
            -1.0)  # Don't show initial layer height.
        object_batch = renderer.createRenderBatch(shader=view._enabled_shader)
        renderer.addRenderBatch(object_batch)
        for node in UM.Scene.Iterator.DepthFirstIterator.DepthFirstIterator(
                application.getController().getScene().getRoot()):
            if not node.getMeshData() or not node.isSelectable():
                continue
            uniforms = {}

            # Get the object's colour.
            extruder_index = int(
                node.callDecoration("getActiveExtruderPosition"))
            material_color = application.getExtrudersModel().getItem(
                extruder_index)["color"]
            uniforms["diffuse_color"] = [
                int(material_color[1:3], 16) / 255,
                int(material_color[3:5], 16) / 255,
                int(material_color[5:7], 16) / 255, 1.0
            ]

            # Render with special shaders for special types of meshes, or otherwise in the normal batch.
            if node.callDecoration("isNonPrintingMesh") and (
                    node.callDecoration("isInfillMesh")
                    or node.callDecoration("isCuttingMesh")):
                renderer.queueNode(node,
                                   shader=view._non_printing_shader,
                                   uniforms=uniforms,
                                   transparent=True)
            elif node.callDecoration("isSupportMesh"):
                uniforms["diffuse_color_2"] = [
                    uniforms["diffuse_color"][0] * 0.6,
                    uniforms["diffuse_color"][1] * 0.6,
                    uniforms["diffuse_color"][2] * 0.6, 1.0
                ]
                renderer.queueNode(node,
                                   shader=view._support_mesh_shader,
                                   uniforms=uniforms)
            else:
                object_batch.addItem(
                    node.getWorldTransformation(copy=False),
                    node.getMeshData(),
                    uniforms=uniforms,
                    normal_transformation=node.getCachedNormalMatrix())

        default_pass = renderer.getRenderPass("default")
        default_pass.render()
        time.sleep(1.2)
        normal_shading = default_pass.getOutput()
        xray_pass = renderer.getRenderPass("xray")
        renderer.addRenderPass(xray_pass)
        xray_pass.render()
        time.sleep(1.2)
        xray_shading = xray_pass.getOutput()

        # Manually composite these shadings. Because the composite shader also adds a background colour.
        normal_data = normal_shading.bits().asarray(
            normal_shading.sizeInBytes())
        composite_pixels = numpy.frombuffer(normal_data,
                                            dtype=numpy.uint8).reshape([
                                                normal_shading.height(),
                                                normal_shading.width(), 4
                                            ])  # Start from the normal image.
        colours = numpy.true_divide(composite_pixels[:, :, 0:3],
                                    255)  # Scaled to [0, 1].
        alpha = numpy.true_divide(composite_pixels[:, :, 3], 255)
        xray_data = xray_shading.bits().asarray(xray_shading.sizeInBytes())
        xray_pixels = numpy.frombuffer(xray_data, dtype=numpy.uint8).reshape(
            [xray_shading.height(),
             xray_shading.width(), 4])
        xray_pixels = numpy.mod(
            xray_pixels[:, :, 0:3], 10
        ) // 5  # The X-ray shader creates increments of 5 for some reason. If there are an odd number of increments (not divisible by 10) then it must be highlighted.
        hue_shift = ((alpha - 0.333) * 6.2831853)
        cos_shift = numpy.repeat(numpy.expand_dims(numpy.cos(-hue_shift),
                                                   axis=2),
                                 3,
                                 axis=2)
        sin_shift = numpy.repeat(numpy.expand_dims(numpy.sin(-hue_shift),
                                                   axis=2),
                                 3,
                                 axis=2)
        k = numpy.array(
            [0.57735, 0.57735, 0.57735]
        )  # 1/sqrt(3), resulting in a diagonal unit vector around which we rotate the channels.
        cross_colour = numpy.cross(colours, k) * -1
        dot_colour = numpy.repeat(numpy.expand_dims(numpy.dot(colours, k),
                                                    axis=2),
                                  3,
                                  axis=2)
        rotated_hue = colours * cos_shift + cross_colour * sin_shift + (
            cos_shift * -1 +
            1.0) * dot_colour * k  # Rodrigues' rotation formula!
        rotated_hue = rotated_hue * 255

        composite_pixels[:, :, 0:
                         3] -= composite_pixels[:, :, 0:
                                                3] * xray_pixels  # Don't use the normal colour for x-rayed pixels.
        composite_pixels[:, :, 0:3] += (rotated_hue * xray_pixels).astype(
            "uint8")  # Use the rotated colour instead.
        composite_pixels[:, :, 3][alpha > 0.1] = 255
        return QImage(
            composite_pixels.data, composite_pixels.shape[1],
            composite_pixels.shape[0], colour_format
        ).copy(
        )  # Make a copy because the pixel data will go out of scope for Numpy, so that would be invalid memory.
Exemplo n.º 47
0
 def time_quantity_np_truediv(self):
     np.true_divide(self.data, self.data2)
Exemplo n.º 48
0
def channel_estimation(symbols, known_block):

    first_symbol = np.fft.fft(symbols[0], N)
    last_symbol = np.fft.fft(symbols[-1], N)
    # known_block = known_block[CP:]

    # symbols = [symbol[CP:] for symbol in symbols]

    # Take average value of H determined for each block
    symbols = np.average(symbols, axis=0)

    symbols_freq = np.fft.fft(symbols, N)

    #This should not print out anything and yet it does
    print([block for block in known_block if np.abs(block) < 0.00001])

    known_block_freq = np.fft.fft(known_block, N)

    channel_response_freq = np.true_divide(
        symbols_freq,
        known_block_freq,
        out=np.zeros_like(symbols_freq),
        where=np.abs(known_block_freq) > 0.01,
    )

    # Remove DC value
    channel_response_freq[0] = 0
    channel_response_freq[int(N / 2)] = 0

    #Might be needed later to avoid decoding issues
    # channel_response = np.fft.ifft(channel_response_freq, N)[:10]
    # plt.figure()
    # plt.title("Channel Responce in DFT domain")
    # plt.plot(channel_response)
    # plt.show()

    #####Linear phase stuff
    first_symbol_resp = np.true_divide(first_symbol,
                                       known_block_freq,
                                       out=np.zeros_like(first_symbol),
                                       where=known_block_freq != 0)
    last_symbol_resp = np.true_divide(last_symbol,
                                      known_block_freq,
                                      out=np.zeros_like(last_symbol),
                                      where=known_block_freq != 0)

    phase_shift_start = np.angle(first_symbol_resp, deg=True)
    phase_shift_end = np.angle(last_symbol_resp, deg=True)

    phase_shift_start = np.unwrap(phase_shift_start)
    phase_shift_end = np.unwrap(phase_shift_end)

    phase_shift = np.subtract(phase_shift_end, phase_shift_start)

    x = np.linspace(0, N, N)

    # plt.figure()
    # plt.plot(first_symbol_resp, color='r')
    # plt.plot(last_symbol_resp)
    # plt.show()

    # plt.figure()
    # plt.plot(phase_shift,label="sub")
    lin_phase_shift = np.polyfit(x, phase_shift, deg=1)
    # print(lin_phase_shift)

    lin_phase_shift = [i * lin_phase_shift[0] + lin_phase_shift[1] for i in x]

    # plt.plot(lin_phase_shift, label='lin')
    # plt.legend()
    # plt.show()

    #channel_response_freq = np.fft.fft(channel_response,N)

    return channel_response_freq
Exemplo n.º 49
0
 def truncated_division(a, b):
     ints = [np.int8, np.int16, np.int32, np.int64]
     if b.dtype in ints:
         return np.trunc(np.true_divide(a, b)).astype(b.dtype)
     else:
         return np.true_divide(a, b).astype(b.dtype)
Exemplo n.º 50
0
def load_data_train(root, img_size=64):
    '''
    loads the data from files and returns a torch dataset
    '''
    data_folders = [os.path.join(root, 'Imagenet64_train_part1'),
                    os.path.join(root, 'Imagenet64_train_part2')]
    x_data = []

    img_size2 = img_size * img_size
    tot_mean = None

    for data_folder in data_folders:
        for filename in os.listdir(data_folder):
            # read the data as numpy arrays
            with open(os.path.join(data_folder, filename), 'rb') as f:
                print("processing file: ", filename)
                print("unpickling")
                datadict = pickle.load(f, encoding='latin1')
                x = np.array(datadict['data'])
                # y = np.array(datadict['labels'])
                mean_image = datadict['mean']
                print("finished unpickling")

                print("preprocessing data")
                # performing normalization i.e. x /= np.float32(255)
                np.true_divide(x, np.float32(255), out=x, casting='unsafe')
                np.true_divide(mean_image, np.float32(255), out=mean_image, casting='unsafe')

                # Labels are indexed from 1, shift it so that indexes start at 0
                # y = [i-1 for i in y]

                np.subtract(x, mean_image, out=x, casting='unsafe')
                if tot_mean is None:
                    tot_mean = mean_image
                else:
                    tot_mean += mean_image

                x = np.dstack((x[:, :img_size2], x[:, img_size2:2*img_size2], x[:, 2*img_size2:]))
                x = x.reshape((x.shape[0], img_size, img_size, 3)).transpose(0, 3, 1, 2)

                x_data.append(x)
                # y_data.append(y)
                print("completed preprocessing\n")

    # X_train, Y_train = np.concatenate(x_data), np.concatenate(y_data)
    X_train = np.concatenate(x_data)
    tot_mean /= 10

    # print("done processing everything!! Creating mirrored versions now")
    # create mirrored images
    # X_train = x[0:data_size, :, :, :]
    # Y_train = y[0:data_size]
    # X_train_flip = X_train[:, :, :, ::-1]
    # Y_train_flip = Y_train
    # X_train = np.concatenate((X_train, X_train_flip), axis=0)
    # Y_train = np.concatenate((Y_train, Y_train_flip), axis=0)

    print("returning data now")
    return (ImageNet64Data(X_train, None),
            # CIFAR10Data(X_val, y_val),
            # CIFAR10Data(X_test, y_test),
            mean_image)
            m_new = m
            st_new = st
            for axis in l:
                # If axis is 0  it is equivalent to x[np.newaxis,:]
                # If axis is 1  it is equivalent to x[:,np.newaxis]
                # And so on
                m_new = np.expand_dims(m_new, axis=axis)
                st_new = np.expand_dims(st_new, axis=axis)

            print('Time axis', time_axis)
            print('New axis', l)
            print('m', m.shape)
            print('st', st.shape)
            print('st_new', st_new.shape)
            print('m_new', m_new.shape)
            datos = np.true_divide(
                (datos - m_new), st_new) * np.nanmean(st) + np.nanmean(m)

        medians[band] = np.nanmedian(datos, time_axis)
        medians[band][np.sum(allNan, time_axis) < minValid] = -9999

medians["ndvi"] = np.true_divide(medians["nir"] - medians["red"],
                                 medians["nir"] + medians["red"])
medians["nbr"] = np.true_divide(medians["nir"] - medians["swir1"],
                                medians["nir"] + medians["swir1"])
medians["nbr2"] = np.true_divide(medians["swir1"] - medians["swir2"],
                                 medians["swir1"] + medians["swir2"])
medians["ndmi"] = np.true_divide(medians["nir"] - medians["swir1"],
                                 medians["nir"] + medians["swir1"])
#medians["gndvi"]=np.true_divide(medians["nir"]-medians["green"],medians["nir"]+medians["green"])
medians["rvi"] = np.true_divide(medians["nir"], medians["red"])
Exemplo n.º 52
0
 def truncated_mod(a, b):
     return a-b*np.trunc(np.true_divide(a, b)).astype(b.dtype)
Exemplo n.º 53
0
def prep_data(data, covariates, data_start, train=True):
    #print("train: ", train)
    time_len = data.shape[0]
    #print("time_len: ", time_len)
    input_size = window_size - stride_size
    windows_per_series = np.full((num_series),
                                 (time_len - input_size) // stride_size)
    #print("windows pre: ", windows_per_series.shape)
    if train:
        windows_per_series -= (data_start + stride_size - 1) // stride_size
    #print("data_start: ", data_start.shape)
    #print(data_start)
    #print("windows: ", windows_per_series.shape)
    #print(windows_per_series)
    total_windows = np.sum(windows_per_series)
    x_input = np.zeros((total_windows, window_size, 1 + num_covariates + 1),
                       dtype='float32')
    label = np.zeros((total_windows, window_size), dtype='float32')
    v_input = np.zeros((total_windows, 2), dtype='float32')
    #cov = 3: ground truth + age + day_of_week + hour_of_day + num_series
    #cov = 4: ground truth + age + day_of_week + hour_of_day + month_of_year + num_series
    count = 0
    if not train:
        covariates = covariates[-time_len:]
    for series in trange(num_series):
        cov_age = stats.zscore(np.arange(total_time - data_start[series]))
        if train:
            covariates[data_start[series]:time_len,
                       0] = cov_age[:time_len - data_start[series]]
        else:
            covariates[:, 0] = cov_age[-time_len:]
        for i in range(windows_per_series[series]):
            if train:
                window_start = stride_size * i + data_start[series]
            else:
                window_start = stride_size * i
            window_end = window_start + window_size
            '''
            print("x: ", x_input[count, 1:, 0].shape)
            print("window start: ", window_start)
            print("window end: ", window_end)
            print("data: ", data.shape)
            print("d: ", data[window_start:window_end-1, series].shape)
            '''
            x_input[count, 1:, 0] = data[window_start:window_end - 1, series]
            x_input[count, :, 1:1 +
                    num_covariates] = covariates[window_start:window_end, :]
            x_input[count, :, -1] = series
            label[count, :] = data[window_start:window_end, series]
            nonzero_sum = (x_input[count, 1:input_size, 0] != 0).sum()
            if nonzero_sum == 0:
                v_input[count, 0] = 0
            else:
                v_input[count, 0] = np.true_divide(
                    x_input[count, 1:input_size, 0].sum(), nonzero_sum) + 1
                x_input[count, :, 0] = x_input[count, :, 0] / v_input[count, 0]
                if train:
                    label[count, :] = label[count, :] / v_input[count, 0]
            count += 1
    prefix = os.path.join(save_path, 'train_' if train else 'test_')
    np.save(prefix + 'data_' + save_name, x_input)
    np.save(prefix + 'v_' + save_name, v_input)
    np.save(prefix + 'label_' + save_name, label)
Exemplo n.º 54
0
def normalizeQueryState(data):
    if data is None:
        return
    data[0] = data[0].astype(float)
    data[1] = data[1].astype(float)
    data[2] = data[2].astype(float)
    data[0][0] = np.true_divide(data[0][0],20000)
    data[0][1] = np.true_divide(data[0][1],5000)
    data[0][2] = np.true_divide(data[0][2],5000)
    data[0][3] = np.true_divide(data[0][3],200)
    data[0][4] = np.true_divide(data[0][4],200)
    data[0][5] = np.true_divide(data[0][5],200)
    data[0][6] = np.true_divide(data[0][6],200)

    data[1][0] = np.true_divide(data[1][0],4)
    data[1][1] = np.true_divide(data[1][1],2)
    data[2][0] = np.true_divide(data[2][0],3)
    data[2][1] = np.true_divide(data[2][1],2)
    data[2][3] = np.true_divide(data[2][3],3)
    data[2][4] = np.true_divide(data[2][4],2000)
    data[2][5] = np.true_divide(data[2][5],255)
    return data
Exemplo n.º 55
0
 def test_true_divide_2(self):
     from numpy import arange, array, true_divide
     assert (true_divide(arange(3), array([2, 2, 2])) == array([0, 0.5, 1])).all()
Exemplo n.º 56
0
def load_data(project_name, pickle_name):
    '''
    
    Description: data loader for two different projects:
        -'mnist'
        -'celebA'
    This function is taylored specifically for the data organization of these
    projects and needs to be adapted in order to work with other projects.

    Inputs:
        -'project_name' (str) = the name of the project from which we will load data.
        -'pickle_name' (str) = the name of the pickle file containing the data.

    Return:
        -'data_dict' (data_dict) = for the 'mnist' project, a data dictionary with form
            data_dict[key_1][key_2]
        with key_1 in ['train', 'testa', 'testb'] and key_2 in ['inputs', 'target'].
        OR
        -'images_array' (np.ndarray) = a stacked 4-D array of shape 'num_images' x 64 x 64 x 3.
        -'features_images_dict' (dict) = a dictionary with structure
            {'face features': [list of indices of images having that face feature]}.

    '''

    # Data loading for the 'mnist' project. Expected file structure is
    # root
    #   |--mnist
    #       |--const.DIR_DATA
    #       |--'pickle_name'-tf-train.pkl
    #       |--'pickle_name'-tf-testa.pkl
    #       |--'pickle_name'-tf-testb.pkl
    if project_name == 'mnist':
        data_dict = {}
        for key in const.LIST_TRAIN_TESTA_TESTB_KEYS:
            data_dict[key] = {}
            path = os.path.join(const.PATH_ROOT, project_name, const.DIR_DATA,
                                pickle_name + '-' + key + '.pkl')
            with open(path, 'rb') as file_handle:
                data_tuple = pickle.load(file_handle)
                data_dict[key]['inputs'] = np.stack(data_tuple[0] / 255)
                data_dict[key]['inputs'] = np.expand_dims(np.copy(
                    data_dict[key]['inputs']),
                                                          axis=3)
                data_dict[key]['target'] = np.stack(data_tuple[1])

        return data_dict

    # Data loading for the 'celebA' project. Expected file structure is
    # root
    #   |--celebA
    #       |--const.DIR_DATA
    #               |--'pickle_name'.pkl
    #               |--'pickle_name'_details.pkl (optional, not used for training)
    elif project_name == 'celebA':

        # Get face image data:
        path = os.path.join(const.PATH_ROOT, project_name, const.DIR_DATA,
                            pickle_name + '.pkl')
        with open(path, 'rb') as file_handle:
            images_array = pickle.load(file_handle)
            images_array = np.true_divide(images_array, 255.0)

        # Looks for face attribute labels too:
        path = os.path.join(const.PATH_ROOT, project_name, const.DIR_DATA,
                            pickle_name + '_details.pkl')
        try:
            with open(path, 'rb') as file_handle:
                features_images_dict = pickle.load(file_handle)
        except:
            features_images_dict = {}

        return images_array, features_images_dict
Exemplo n.º 57
0
import numpy as np
a = np.array([5, 5, -5, -5])
b = np.array([2, -2, 2, -2])
print(a, b)
c = np.true_divide(a, b)
d = np.divide(a, b)
e = a / b
print(c, d, e)
f = np.floor_divide(a, b)
g = a // b
print(f, g)
h = np.ceil(a / b).astype(int)
print(h)
i = np.trunc(a / b).astype(int)
j = (a / b).astype(int)
print(i, j)
k = np.remainder(a, b)
l = np.mod(a, b)
m = a % b
print(k, l, m)
n = np.fmod(a, b)
print(n)
                                            satype = 'C[%d:]=A[%d:]%sscalar' % (
                                                zoff, xoff, op)
                                            y = y[0]
                                        elif scalararraytype == 2:
                                            satype = 'C[%d:]=scalar%sA[%d:]' % (
                                                zoff, op, yoff)
                                            #y = x  y=y[0]
                                            x = x[0]

                                        CPA_min = runBench(
                                            np_func,
                                            z,
                                            x,
                                            y,
                                            internalCount=internalCount)
                                        CPE_min = np.true_divide(CPA_min, n)
                                        CPEs[zi][xi][yi] = CPE_min
                                        if args.verbose:
                                            print(args.prefix,
                                                  '% 7s' % impl,
                                                  satype,
                                                  np_type.__name__,
                                                  '% 6d' % internalCount,
                                                  '% 7d' % n,
                                                  '% 4.2f' % CPE_min,
                                                  sep=', ',
                                                  flush=True)
                            if scalararraytype == 0:
                                satype = ' array%sarray' % op
                            elif scalararraytype == 1:
                                satype = 'array%sscalar' % op
def get_crawled_term_analysis_page(shared_state):
    st.header("Crawled term analysis")

    crawled_terms_df = shared_state.crawled_terms_df
    df_counts_by_hour = shared_state.df_counts_by_hour
    df_most_common_hashtags = shared_state.df_most_common_hashtags
    df_most_common_tokens = shared_state.df_most_common_tokens
    df_cooccurrence = shared_state.df_cooccurrence

    st.subheader("List of Crawled Terms (since 3rd of November)")

    st.dataframe(crawled_terms_df)

    st.subheader("Co-occurrence matrix (for terms with count > 5000)")

    st.dataframe(df_cooccurrence)
    co_occurrence_diagonal = np.diagonal(df_cooccurrence)

    with np.errstate(divide="ignore", invalid="ignore"):
        co_occurrence_fraction = np.nan_to_num(
            np.true_divide(df_cooccurrence, co_occurrence_diagonal[:, None])
        )

    fig = plt.figure()
    st.subheader("Co-occurence heatmap (inverted log-scaling)")
    st.markdown("Closer to 0 means higher correlation")
    with st.echo():
        co_occurrence_heatmap_df = pd.DataFrame(
            np.log(co_occurrence_fraction),
            index=df_cooccurrence.index,
            columns=df_cooccurrence.columns,
        )
    np.fill_diagonal(co_occurrence_heatmap_df.values, np.nan)
    sns.heatmap(
        co_occurrence_heatmap_df,
        cbar=False,
        square=True,
        annot=True,
        vmin=-5,
        vmax=0,
        center=-2,
        cmap="PuBu",
        linecolor="black",
    )

    plt.tick_params(
        axis="both",
        which="major",
        labelsize=10,
        labelbottom=False,
        bottom=False,
        top=False,
        labeltop=True,
    )
    plt.xticks(rotation=45)
    st.pyplot(fig)

    selected_crawled_term = st.selectbox("Select term", crawled_terms_df["term"].values)

    st.plotly_chart(
        plotly_hourly_coverage(
            df_counts_by_hour, selected_crawled_term, selected_crawled_term
        )
    )

    col1, col2 = st.columns(2)
    col1.subheader("Top hashtags for '{}'".format(selected_crawled_term))
    col1.dataframe(get_most_common(df_most_common_hashtags, selected_crawled_term, 15))

    col2.subheader("Top tokens for '{}'".format(selected_crawled_term))
    col2.dataframe(get_most_common(df_most_common_tokens, selected_crawled_term, 15))

    st.subheader("--Temporarily Disabled--")
    st.subheader("10 randomly sampled tweets from '{}'".format(selected_crawled_term))

    # term_stats = (
    #     recent_tweet_df[recent_tweet_df[selected_crawled_term] == 1][
    #         ["retweet_count", "quote_count"]
    #     ]
    #     .fillna(0)
    #     .astype(int)
    # )
    # top_retweeted = term_stats.nlargest(10, "retweet_count").sort_values(
    #     "retweet_count", ascending=False
    # )
    # top_quoted = term_stats.nlargest(10, "quote_count").sort_values(
    #     "quote_count", ascending=False
    # )
    # st.table(
    #     pd.DataFrame(
    #         map(
    #             lambda t: [t["text"], t["quote_count"], t["retweet_count"]],
    #             lookup_parsed_tweet_data(
    #                 filtered_by_crawled_term.sample(n=10).index.values
    #             ),
    #         ),
    #         columns=["text", "quote_count", "retweet_count"],
    #     )
    # )

    st.subheader("10 most retweeted tweets for '{}'".format(selected_crawled_term))
    # st.table(
    #     pd.DataFrame(
    #         map(
    #             lambda t: [t["text"], t["quote_count"], t["retweet_count"]],
    #             lookup_parsed_tweet_data(top_retweeted.index.values),
    #         ),
    #         columns=["text", "quote_count", "retweet_count"],
    #     )
    # )

    st.subheader("10 most quoted tweets for '{}'".format(selected_crawled_term))
Exemplo n.º 60
0
def process(
    input_CD_image,
    input_OF_image,
    params,
    model_params,
    num_of_heatmaps=27,  # 26 + background --> 27
    num_of_OFFs=52,  # 26 pairs: 52 layers in total 
    num_of_OFFs_normal=27):  # number of pairs (26) + 1 --> 27

    print(input_CD_image)
    print(input_OF_image)

    oriImgCD = cv2.imread(input_CD_image)  # B,G,R order
    oriImgOF = cv2.imread(input_OF_image)  # B,G,R order

    rawDepth = read_pgm(input_CD_image.replace("mc_blob.png", "depth.pgm"),
                        byteorder='>')

    heatmap_avg = np.zeros(
        (oriImgCD.shape[0], oriImgCD.shape[1], num_of_heatmaps))
    off_avg = np.zeros((oriImgOF.shape[0], oriImgOF.shape[1], num_of_OFFs))

    for m in range(len(multiplier)):
        scale = multiplier[m]

        image_CD_ToTest = cv2.resize(oriImgCD, (0, 0),
                                     fx=scale,
                                     fy=scale,
                                     interpolation=cv2.INTER_CUBIC)
        image_CD_ToTest_padded, pad = util.padRightDownCorner(
            image_CD_ToTest, model_params['stride'], model_params['padValue'])

        input_img_CD = np.transpose(
            np.float32(image_CD_ToTest_padded[:, :, :, np.newaxis]),
            (3, 0, 1, 2))  # required shape (1, width, height, channels)

        image_OF_ToTest = cv2.resize(oriImgCD, (0, 0),
                                     fx=scale,
                                     fy=scale,
                                     interpolation=cv2.INTER_CUBIC)
        image_OF_ToTest_padded, pad = util.padRightDownCorner(
            image_OF_ToTest, model_params['stride'], model_params['padValue'])

        input_img_OF = np.transpose(
            np.float32(image_OF_ToTest_padded[:, :, :, np.newaxis]),
            (3, 0, 1, 2))  # required shape (1, width, height, channels)

        output_blobs = model.predict([input_img_OF, input_img_CD])

        # extract outputs, resize, and remove padding
        # The CD input is used for having the required parameters since they are the same for both inputs

        heatmap = np.squeeze(output_blobs[1])  # output 1 is heatmaps
        heatmap = cv2.resize(heatmap, (0, 0),
                             fx=model_params['stride'],
                             fy=model_params['stride'],
                             interpolation=cv2.INTER_CUBIC)
        heatmap = heatmap[:image_CD_ToTest_padded.shape[0] -
                          pad[2], :image_CD_ToTest_padded.shape[1] - pad[3], :]
        heatmap = cv2.resize(heatmap, (oriImgCD.shape[1], oriImgCD.shape[0]),
                             interpolation=cv2.INTER_CUBIC)

        off = np.squeeze(output_blobs[0])  # output 0 is OFFs
        off = cv2.resize(off, (0, 0),
                         fx=model_params['stride'],
                         fy=model_params['stride'],
                         interpolation=cv2.INTER_CUBIC)
        off = off[:image_CD_ToTest_padded.shape[0] -
                  pad[2], :image_CD_ToTest_padded.shape[1] - pad[3], :]
        off = cv2.resize(off, (oriImgCD.shape[1], oriImgCD.shape[0]),
                         interpolation=cv2.INTER_CUBIC)

        heatmap_avg = heatmap_avg + heatmap / len(multiplier)
        off_avg = off_avg + off / len(multiplier)

    all_peaks = []
    peak_counter = 0

    for part in range(num_of_heatmaps):
        map_ori = heatmap_avg[:, :, part]
        map = gaussian_filter(map_ori, sigma=3)

        map_left = np.zeros(map.shape)
        map_left[1:, :] = map[:-1, :]
        map_right = np.zeros(map.shape)
        map_right[:-1, :] = map[1:, :]
        map_up = np.zeros(map.shape)
        map_up[:, 1:] = map[:, :-1]
        map_down = np.zeros(map.shape)
        map_down[:, :-1] = map[:, 1:]

        peaks_binary = np.logical_and.reduce(
            (map >= map_left, map >= map_right, map >= map_up, map >= map_down,
             map > params['thre1']))
        peaks = list(
            zip(np.nonzero(peaks_binary)[1],
                np.nonzero(peaks_binary)[0]))  # note reverse
        peaks_with_score = [x + (map_ori[x[1], x[0]], ) for x in peaks]
        id = range(peak_counter, peak_counter + len(peaks))
        peaks_with_score_and_id = [
            peaks_with_score[i] + (id[i], ) for i in range(len(id))
        ]

        all_peaks.append(peaks_with_score_and_id)
        peak_counter += len(peaks)

    connection_all = []
    special_k = []
    mid_num = 4

    for k in range(len(mapIdx)):
        score_mid = off_avg[:, :, [x - num_of_OFFs_normal for x in mapIdx[k]]]
        candA = all_peaks[limbSeq[k][0] - 1]
        candB = all_peaks[limbSeq[k][1] - 1]
        nA = len(candA)
        nB = len(candB)
        indexA, indexB = limbSeq[k]
        if (nA != 0 and nB != 0):
            connection_candidate = []
            for i in range(nA):
                for j in range(nB):
                    vec = np.subtract(candB[j][:2], candA[i][:2])
                    norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
                    # failure case when 2 body parts overlaps
                    if norm == 0:
                        continue
                    vec = np.divide(vec, norm)

                    startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
                                   np.linspace(candA[i][1], candB[j][1], num=mid_num)))

                    vec_x = np.array(
                        [score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
                         for I in range(len(startend))])
                    vec_y = np.array(
                        [score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
                         for I in range(len(startend))])

                    score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(
                        vec_y, vec[1])
                    score_with_dist_prior = sum(
                        score_midpts) / len(score_midpts) + min(
                            0.5 * oriImgCD.shape[0] / norm - 1, 0)
                    criterion1 = len(
                        np.nonzero(score_midpts > params['thre2'])
                        [0]) > 0.8 * len(score_midpts)
                    criterion2 = score_with_dist_prior > 0
                    if criterion1 and criterion2:
                        connection_candidate.append([
                            i, j, 3 * score_with_dist_prior,
                            3 * score_with_dist_prior + candA[i][2] +
                            candB[j][2]
                        ])

            connection_candidate = sorted(connection_candidate,
                                          key=lambda x: x[2],
                                          reverse=True)
            connection = np.zeros((0, 5))
            for c in range(len(connection_candidate)):
                i, j, s = connection_candidate[c][0:3]
                if (i not in connection[:, 3] and j not in connection[:, 4]):
                    connection = np.vstack(
                        [connection, [candA[i][3], candB[j][3], s, i, j]])
                    if (len(connection) >= min(nA, nB)):
                        break

            connection_all.append(connection)
        else:
            special_k.append(k)
            connection_all.append([])

    # last number in each row is the total parts number of that person
    # the second last number in each row is the score of the overall configuration
    subset = -1 * np.ones((0, num_of_OFFs_normal + 1))
    candidate = np.array([item for sublist in all_peaks for item in sublist])

    for k in range(len(mapIdx)):
        if k not in special_k:
            partAs = connection_all[k][:, 0]
            partBs = connection_all[k][:, 1]
            indexA, indexB = np.array(limbSeq[k]) - 1

            for i in range(len(connection_all[k])):  # = 1:size(temp,1)
                found = 0
                subset_idx = [-1, -1]
                for j in range(len(subset)):  # 1:size(subset,1):
                    if subset[j][indexA] == partAs[i] or subset[j][
                            indexB] == partBs[i]:
                        subset_idx[found] = j
                        found += 1

                if found == 1:
                    j = subset_idx[0]
                    if (subset[j][indexB] != partBs[i]):
                        subset[j][indexB] = partBs[i]
                        subset[j][-1] += 1
                        subset[j][-2] += candidate[partBs[i].astype(int),
                                                   2] + connection_all[k][i][2]
                elif found == 2:  # if found 2 and disjoint, merge them
                    j1, j2 = subset_idx
                    membership = ((subset[j1] >= 0).astype(int) +
                                  (subset[j2] >= 0).astype(int))[:-2]
                    if len(np.nonzero(membership == 2)[0]) == 0:  # merge
                        subset[j1][:-2] += (subset[j2][:-2] + 1)
                        subset[j1][-2:] += subset[j2][-2:]
                        subset[j1][-2] += connection_all[k][i][2]
                        subset = np.delete(subset, j2, 0)
                    else:  # as like found == 1
                        subset[j1][indexB] = partBs[i]
                        subset[j1][-1] += 1
                        subset[j1][-2] += candidate[
                            partBs[i].astype(int), 2] + connection_all[k][i][2]

                # if find no partA in the subset, create a new subset
                elif not found and k < num_of_heatmaps - 1:
                    row = -1 * np.ones(num_of_OFFs_normal + 1)
                    row[indexA] = partAs[i]
                    row[indexB] = partBs[i]
                    row[-1] = 2
                    row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + \
                              connection_all[k][i][2]
                    subset = np.vstack([subset, row])

    # delete some rows of subset which has few parts occur
    deleteIdx = []
    for i in range(len(subset)):
        if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
            deleteIdx.append(i)
    # subset = np.delete(subset, deleteIdx, axis=0)

    canvas = oriImgCD  # B,G,R order

    all_peaks_max_index = np.zeros(num_of_heatmaps - 1, dtype=int)
    for i in range(num_of_heatmaps - 1):
        if len(all_peaks[i]) > 0:
            max_value = 0
            for j in range(len(all_peaks[i])):
                if max_value < all_peaks[i][j][2]:
                    max_value = all_peaks[i][j][2]
                    all_peaks_max_index[i] = j

    deleteIdReflector = []
    for i in range(num_of_heatmaps - 1):
        if len(all_peaks[i]) > 0:
            for j in range(num_of_heatmaps - 1):
                if i != j and len(all_peaks[j]) > 0:
                    vec = np.subtract(all_peaks[i][all_peaks_max_index[i]][:2],
                                      all_peaks[j][all_peaks_max_index[j]][:2])
                    norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
                    if norm < 6:
                        if (all_peaks[i][all_peaks_max_index[i]][2] >
                                all_peaks[j][all_peaks_max_index[j]][2]):
                            deleteIdReflector.append(j)
                        else:
                            deleteIdReflector.append(i)
    for i in range(len(deleteIdReflector)):
        all_peaks[deleteIdReflector[i]] = []

    file_3d.write(str(frameIndex) + '\n')
    file_3d.write('NONE {  }\n')

    detected_contour_depth_values = []
    detected_contour_coordinates = []
    detected_rectangles = []
    detected_ids = []
    merged_sets = []

    for i in range(num_of_heatmaps - 1):
        if len(all_peaks[i]) > 0 and all_peaks[i] != []:
            # cv2.circle(canvas, all_peaks[i][all_peaks_max_index[i]][0:2], 4, colors[i], thickness=4)
            # Copy the thresholded image.
            im_floodfill = canvas.copy()

            # Mask used to flood filling.
            # Notice the size needs to be 2 pixels than the image.
            h, w = canvas.shape[:2]
            mask = np.zeros((h + 2, w + 2), np.uint8)

            # Floodfill from point (0, 0)
            flood_return = cv2.floodFill(
                im_floodfill, mask, all_peaks[i][all_peaks_max_index[i]][0:2],
                [255, 255, 255])

            for j in range(len(detected_rectangles)):
                if (detected_ids[j] != i):
                    if isMergedRegion(detected_rectangles[j], flood_return[3]):

                        # if ()
                        # del detected_rectangles[j]
                        # break
                        merged_sets.append([i, detected_ids[j]])

                        # cv2.imshow("image", flood_return[1])
                        # cv2.waitKey(0)
            detected_ids.append(i)
            detected_rectangles.append(flood_return[3])

            # Invert floodfilled image
            im_floodfill_inv = cv2.bitwise_not(im_floodfill)

            # Combine the two images to get the foreground.
            fill_image = canvas | im_floodfill_inv

            mask_gray = cv2.cvtColor(fill_image, cv2.COLOR_BGR2GRAY)
            # mask_gray = cv2.normalize(src=mask_gray, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)

            im2, contours, hierarchy = cv2.findContours(
                mask_gray, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
            if len(contours) > 1:

                values = np.zeros((contours[1].shape[0]), dtype=float)
                for p in range(contours[1].shape[0]):
                    depth_value = rawDepth[contours[1][p][0][1]][contours[1][p]
                                                                 [0][0]]
                    # print(str(i) + " " + str(depth_value))
                    if depth_value > 1000 and depth_value < 2500 and contours[
                            1][p][0][1] > 10 and contours[1][p][0][
                                1] < 400 and contours[1][p][0][
                                    0] > 40 and contours[1][p][0][0] < 400:
                        values[p] = depth_value
                    else:
                        values[p] = 0
                    if (values[p] > 0):
                        detected_contour_coordinates.append([
                            contours[1][p][0][1], contours[1][p][0][0],
                            values[p]
                        ])

                detected_contour_coordinates.sort(key=lambda x: x[
                    2])  # = np.sort(detected_contour_coordinates, axis=2)
                values[::-1].sort()
                values = [x for x in values if x > 0]

                if np.median(values) != np.nan and np.median(values) > 0:
                    detected_contour_depth_values.append(values)
                else:
                    del detected_ids[-1]
                    del detected_rectangles[-1]
            else:
                del detected_ids[-1]
                del detected_rectangles[-1]

    ## Clustering
    temp_detected = [x for x in detected_ids]

    for i in range(len(merged_sets)):
        if (merged_sets[i][0] in temp_detected) and (merged_sets[i][1]
                                                     in temp_detected):
            kmeans = KMeans(n_clusters=2,
                            random_state=0).fit(detected_contour_coordinates)

            detected_contour_depth_values[detected_ids.index(
                merged_sets[i][0])] = [
                    x[2] for x in detected_contour_coordinates if
                    kmeans.labels_[detected_contour_coordinates.index(x)] == 0
                ]
            detected_contour_depth_values[detected_ids.index(
                merged_sets[i][1])] = [
                    x[2] for x in detected_contour_coordinates if
                    kmeans.labels_[detected_contour_coordinates.index(x)] == 1
                ]

            temp_detected.remove(merged_sets[i][0])
            temp_detected.remove(merged_sets[i][1])

    # spatial mapping from depthmap to 3D world using the intrinsic and extrinsic camera matrices
    # the extracted 3D points are stored in text files
    detected_index = 0
    for i in range(num_of_heatmaps - 1):
        if (i in detected_ids):
            depth = np.median(detected_contour_depth_values[detected_index])
            if "D4" in input_image:
                vec3 = [
                    KRT4_x[all_peaks[i][all_peaks_max_index[i]][0]][int(
                        all_peaks[i][all_peaks_max_index[i]][1])] * depth,
                    KRT4_y[all_peaks[i][all_peaks_max_index[i]][0]][int(
                        all_peaks[i][all_peaks_max_index[i]][1])] * depth,
                    depth, 1000.0
                ]
                vec3 = np.true_divide(vec3, 1000.0)
                final_vec3 = np.matmul(Ext4, vec3, out=None)

                file_3d.write(reflectors[i + 1] + ' { ' + str(final_vec3[0]) +
                              ' ' + str(final_vec3[1]) + ' ' +
                              str(final_vec3[2]) + ' ' + str(final_vec3[0]) +
                              ' ' + str(final_vec3[1]) + ' ' +
                              str(final_vec3[2]) + ' }\n')
            elif "D6" in input_image:
                vec3 = [
                    KRT6_x[all_peaks[i][all_peaks_max_index[i]][0]][int(
                        all_peaks[i][all_peaks_max_index[i]][1])] * depth,
                    KRT6_y[all_peaks[i][all_peaks_max_index[i]][0]][int(
                        all_peaks[i][all_peaks_max_index[i]][1])] * depth,
                    depth, 1000.0
                ]
                vec3 = np.true_divide(vec3, 1000.0)
                final_vec3 = np.matmul(Ext6, vec3, out=None)

                file_3d.write(reflectors[i + 1] + ' { ' + str(final_vec3[0]) +
                              ' ' + str(final_vec3[1]) + ' ' +
                              str(final_vec3[2]) + ' ' + str(final_vec3[0]) +
                              ' ' + str(final_vec3[1]) + ' ' +
                              str(final_vec3[2]) + ' }\n')
            elif "D8" in input_image:
                vec3 = [
                    KRT8_x[all_peaks[i][all_peaks_max_index[i]][0]][int(
                        all_peaks[i][all_peaks_max_index[i]][1])] * depth,
                    KRT8_y[all_peaks[i][all_peaks_max_index[i]][0]][int(
                        all_peaks[i][all_peaks_max_index[i]][1])] * depth,
                    depth, 1000.0
                ]
                vec3 = np.true_divide(vec3, 1000.0)
                final_vec3 = np.matmul(Ext8, vec3, out=None)

                file_3d.write(reflectors[i + 1] + ' { ' + str(final_vec3[0]) +
                              ' ' + str(final_vec3[1]) + ' ' +
                              str(final_vec3[2]) + ' ' + str(final_vec3[0]) +
                              ' ' + str(final_vec3[1]) + ' ' +
                              str(final_vec3[2]) + ' }\n')

            detected_index += 1
        else:
            file_3d.write(reflectors[i + 1] + ' {  }\n')
    else:
        file_3d.write(reflectors[i + 1] + ' {  }\n')

    file_3d.write('NOSE {  }\n')
    stickwidth = 4

    # for i in range(num_of_heatmaps):
    #     # for n in range(len(subset)):
    #     #     index = subset[n][np.array(limbSeq[i]) - 1]
    #     #     if -1 in index:
    #     #         continue
    #     for n in range(len(connection_all)):
    #         if len(connection_all[n]):
    #             partAs = connection_all[n][:, 0]
    #             partBs = connection_all[n][:, 1]
    #             indexA, indexB = np.array(limbSeq[n]) - 1

    #             cur_canvas = canvas.copy()
    #             Y = candidate[indexA, 0]
    #             X = candidate[indexB, 1]
    #             mX = np.mean(X)
    #             mY = np.mean(Y)
    #             length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
    #             angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
    #             polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0,
    #                                     360, 1)
    #             cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
    #             canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)

    #### PAPER FIGURE
    # at this stage, the estimates are overlayed on the depth images - the depth images occur by grayscaling the colorized images *NOT the raw depth
    canvas = cv2.cvtColor(canvas, cv2.COLOR_BGR2GRAY)
    canvas = cv2.cvtColor(canvas, cv2.COLOR_GRAY2BGR)

    for i in range(len(limbSeq)):
        if len(all_peaks[limbSeq[i][0] - 1]) > 0 and len(
                all_peaks[limbSeq[i][1] - 1]) > 0:
            cur_canvas = canvas.copy()
            Y = all_peaks[limbSeq[i][0] -
                          1][all_peaks_max_index[limbSeq[i][0] - 1]]
            X = all_peaks[limbSeq[i][1] -
                          1][all_peaks_max_index[limbSeq[i][1] - 1]]
            mX = (X[1] + Y[1]) / 2
            mY = (X[0] + Y[0]) / 2
            length = ((X[0] - Y[0])**2 + (X[1] - Y[1])**2)**0.5
            angle = math.degrees(math.atan2(X[1] - Y[1], X[0] - Y[0]))
            polygon = cv2.ellipse2Poly(
                (int(mY), int(mX)), (int(length / 2), stickwidth), int(angle),
                0, 360, 1)
            cv2.fillConvexPoly(cur_canvas, polygon, colors[limbSeq[i][0] - 1])
            canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)

    for i in range(num_of_heatmaps - 1):
        if len(all_peaks[i]) > 0:
            cv2.putText(canvas,
                        str(i + 1),
                        all_peaks[i][all_peaks_max_index[i]][0:2],
                        cv2.FONT_HERSHEY_SIMPLEX,
                        1.0,
                        colors[i],
                        thickness=2,
                        lineType=cv2.LINE_AA)

    cv2.imwrite(input_image_CD.replace(".png", "_processed.jpg"), canvas)
    return canvas