示例#1
0
def correlation_experiment(filename, language, embedding_function, name):
    data, character_encoder, tag_encoder, embedded_chars = read_data(
        filename, language)
    character_decoder = {v: k for k, v in character_encoder.items()}
    features = getphonfeatures()
    language_features = [
        np.array(features[character_decoder[f]])
        if character_decoder[f] in features else None
        for f in range(len(character_encoder))
    ]

    feature_similarity = get_similarity_matrix(language_features,
                                               embedded_chars)

    embeddings = embedding_function(data, character_encoder, embedded_chars,
                                    character_decoder)

    similarities = [
        get_similarity_matrix(m, embedded_chars) for m in embeddings
    ]
    rs = [
        correlation(feature_similarity, similarities[i])[0] for i in [0, 1, 2]
    ]
    print("%s %s:" % (language, name))
    print(" PEARSON R FOR EMBEDDING AND FEATURE REPR. SIMILARITIES:")
    print("  %s,DIM=5" % language, rs[0])
    print("  %s,DIM=15" % language, rs[1])
    print("  %s,DIM=30" % language, rs[2])

    random_rs = [[], [], []]
    for i in range(N):
        random_embeddings = [matshuf(m) for m in embeddings]
        random_similarities = [
            get_similarity_matrix(m, embedded_chars) for m in random_embeddings
        ]
        random_rs[0].append(
            correlation(feature_similarity, random_similarities[0])[0])
        random_rs[1].append(
            correlation(feature_similarity, random_similarities[1])[0])
        random_rs[2].append(
            correlation(feature_similarity, random_similarities[2])[0])

    print((" P=%.2f CONF. INTERVALS FOR PEARSON R OF RANDOM ASSIGNMENT OF\n" %
           P) + " EMBEDDINGS TO PHONEMES AND PHONETIC FEATURE DESCRIPTIONS:")
    civals = [confidence_interval_value(random_rs[i]) for i in [0, 1, 2]]
    print("  %s,DIM=5" % language, confidence_interval_value(random_rs[0]),
          check_r(civals[0], rs[0]), rs[0])
    print("  %s,DIM=15" % language, confidence_interval_value(random_rs[1]),
          check_r(civals[1], rs[1]), rs[1])
    print("  %s,DIM=30" % language, confidence_interval_value(random_rs[2]),
          check_r(civals[2], rs[2]), rs[2])
    print()
def test_4():
    """
    Query : Find correlation between math score and reading score for each race/ethnicity
    Dataset used - https://www.kaggle.com/spscientist/students-performance-in-exams
    Columns : race/ethnicity, math score, reading score 
    """
    table = pandas.read_csv('data/data_for_test_correlation/students_performance.csv')

    result_table, suggestions = correlation.correlation(table, 'math score', 'reading score',
    	                                                dimensions=['race/ethnicity'])

    print(result_table.to_string())
    print(suggestions)

    expected_result_table = """  race/ethnicity  correlation between "math score" , "reading score"
0        group A                                           0.816310 
1        group B                                           0.824536 
2        group C                                           0.810855 
3        group D                                           0.793180 
4        group E                                           0.859600 """

    assert(expected_result_table == result_table.to_string())

    expected_suggestions = """[]"""

    assert(expected_suggestions == str(suggestions))
def plot_transition(state, m_general_1, m_general_2, output_name):
    prob_general_1 = []
    prob_general_2 = []
    for action in actions:
        prob_general_1.append(m_general_1.get(action, 0) * 1.0 / m_general_1["total"])
        prob_general_2.append(m_general_2.get(action, 0) * 1.0 / m_general_2["total"])
    
    ind = np.arange(len(actions))
    width = 0.35
    names = ["ML", "MR", "MU", "MD", "ZI", "ZO", "TF", "TB", "REC", "REAN", "ROC", "ROAN"]

    fig = plt.figure()
    axis  = fig.add_subplot(111)
    rect1 = axis.bar(ind, prob_general_1, width, facecolor = 'none', hatch = '//', label="Real")
    rect2 = axis.bar(ind+width, prob_general_2, width, facecolor='none', hatch='\\\\', label="Synthetic")
    x, y, z, ax, ay, az, last_op = state


    axis.axis(ymin=0, ymax=1)
    axis.set_ylabel('Probability of Action')
    axis.set_xlabel('Action')
    axis.set_title('x=%s, y=%s, z=%s, ax=%s, ay=%s, az=%s, A_p=%s, count: %d'%(x,y,z,ax,ay,az,last_op, m_general_1["total"]))
    axis.set_xticks(ind+width)
    axis.set_xticklabels(names, fontsize='small')
    axis.legend(loc="upper left")
    axis.text(5, 0.95, "correlation: %0.3f"%correlation(prob_general_1, prob_general_2), horizontalalignment='center')
    #autolabel(ax, rect1)
    #autolabel(ax, rect2)
    plt.savefig(output_name)
    plt.close()
示例#4
0
def is_pair_preferred(seq1, seq2):
    if seq1 is None or seq2 is None:
        print 'One or both of the sequences does not exist'
        return False
    if len(seq1) != len(seq2):
        print 'Sequences are not the same length'
        return False

    values_set = set(cor.correlation(seq1, seq2))

    if len(values_set) == 3:
        L = float(len(seq1))
        l = math.log(int(L+1), 2)
        m = 2 if l%2 == 0 else 1
        t = float(1 + 2**((l+m)/2))

        acceptable_values = [float("{0:.5f}".format(-t/L)), float("{0:.5f}".format(-1/L)), float("{0:.5f}".format((t-2)/L))]

        for value in values_set:
            if value not in acceptable_values:
                message = 'Autocorrelation of preferred pair can only have three values and these are: '
                for acceptable_value in acceptable_values:
                    message += '\n' + str(acceptable_value)
                message += '\nOne of existing values (' + str(value) + ') is not a valid one.'
                print message
                return False
        print 'Preferred pair was found. Gold codes will be generated shortly.'
        return True
    else:
        print 'Crosscorrelation function does not have three values: ' + str(len(values_set))
        return False
示例#5
0
文件: prostate.py 项目: jayshonzs/ESL
def calculate_correlation(data):
    cols = data.shape[1]
    cm = np.zeros((cols,cols))
    for i in range(cols):
        for j in range(cols):
            cm[i,j] = correlation.correlation(data[:, i], data[:, j])
    np.savetxt('correlation.txt', cm)
    return cm
示例#6
0
 def forward(self, x1, x2):
     y = correlation(x1,
                     x2,
                     pad_size=4,
                     kernel_size=1,
                     max_displacement=4,
                     stride1=1,
                     stride2=1)
     return y
示例#7
0
def correlation_experiment(file, lan, embf, name):
    data, cencoder, tencoder, embchars = readdata(file, lan)
    cdecoder = {v: k for k, v in cencoder.items()}
    tdecoder = {v: k for k, v in tencoder.items()}
    features = getphonfeatures()
    lanfeatures = [
        np.array(features[cdecoder[f]]) if cdecoder[f] in features else None
        for f in range(len(cencoder))
    ]

    featsim = getsimmatrix(lanfeatures, len(cencoder), embchars)

    embeddings = embf(data, cencoder, embchars, tencoder, cdecoder, tdecoder,
                      lan)

    sims = [getsimmatrix(m, len(cencoder), embchars) for m in embeddings]
    rs = [correlation(featsim, sims[i])[0] for i in [0, 1, 2]]
    print("%s %s:" % (lan, name))
    print(" PEARSON R FOR EMBEDDING AND FEATURE REPR. SIMILARITIES:")
    print("  %s,DIM=5" % lan, rs[0])
    print("  %s,DIM=15" % lan, rs[1])
    print("  %s,DIM=30" % lan, rs[2])

    randrs = [[], [], []]
    for i in range(N):
        ranembeddings = [matshuf(m) for m in embeddings]
        ransims = [
            getsimmatrix(m, len(cencoder), embchars) for m in ranembeddings
        ]
        randrs[0].append(correlation(featsim, ransims[0])[0])
        randrs[1].append(correlation(featsim, ransims[1])[0])
        randrs[2].append(correlation(featsim, ransims[2])[0])

    print((" P=%.2f CONF. INTERVALS FOR PEARSON R OF RANDOM ASSIGNMENT OF\n" %
           P) + " EMBEDDINGS TO PHONEMES AND PHONETIC FEATURE DESCRIPTIONS:")
    civals = [confidenceival(randrs[i]) for i in [0, 1, 2]]
    print("  %s,DIM=5" % lan, confidenceival(randrs[0]),
          checkr(civals[0], rs[0]), rs[0])
    print("  %s,DIM=15" % lan, confidenceival(randrs[1]),
          checkr(civals[1], rs[1]), rs[1])
    print("  %s,DIM=30" % lan, confidenceival(randrs[2]),
          checkr(civals[2], rs[2]), rs[2])
    print()
def test_2():
    """
    Query : Find correlation between NA_Sales & EU_Sales for each platform
    Dataset Used - https://www.kaggle.com/gregorut/videogamesales
    Columns :
        Platform - Platform of the games release (i.e. PC,PS4, etc.)
        NA_Sales - Sales in North America (in millions)
        EU_Sales - Sales in Europe (in millions)
    """
    table = pandas.read_csv('data/data_for_test_correlation/vgsales.csv')
    
    result_table, suggestions = correlation.correlation(table, 'NA_Sales', 'EU_Sales',
                                                        dimensions=['Platform'])

    print(result_table)
    print(suggestions)

    expected_result_table = """   Platform  correlation between "NA_Sales" , "EU_Sales"
0      2600                                     0.996554
1       3DO                                          NaN
2       3DS                                     0.945838
3        DC                                     0.797678
4        DS                                     0.871312
5        GB                                     0.705745
6       GBA                                     0.925557
7        GC                                     0.938478
8       GEN                                     0.973297
9        GG                                          NaN
10      N64                                     0.919057
11      NES                                     0.735203
12       NG                                          NaN
13       PC                                     0.404835
14     PCFX                                          NaN
15       PS                                     0.811515
16      PS2                                     0.654672
17      PS3                                     0.813370
18      PS4                                     0.793474
19      PSP                                     0.701001
20      PSV                                     0.755251
21      SAT                                     0.999610
22      SCD                                     1.000000
23     SNES                                     0.988964
24     TG16                                          NaN
25       WS                                          NaN
26      Wii                                     0.971428
27     WiiU                                     0.959718
28     X360                                     0.854582
29       XB                                     0.831869
30     XOne                                     0.779913"""

    assert(expected_result_table == result_table.to_string())
    
    expected_suggestions = """[]"""

    assert(expected_suggestions == str(suggestions))
示例#9
0
    def test_correlationHardCoded(self):
        with tf.device('/gpu:0'):
            with tf.Session('') as sess:
                batch_size = 1
                width = 64  # 3
                height = 48  # 3
                depth = 256
                stride_1 = 2
                stride_2 = 1
                kernel_size = 1
                max_displacement = 40  # 3
                padding = 40  # 3

                my_shape = [batch_size, width, height, depth]
                a = np.float32(np.ones(my_shape))
                b = np.float32(np.ones(my_shape))

                a = tf.convert_to_tensor(a, tf.float32)
                b = tf.convert_to_tensor(b, tf.float32)

                print("a : ", np.shape(a))
                print("b : ", np.shape(b))

                # ResizeMethod.NEAREST_NEIGHBOR
                a_resize = tf.image.resize_images(a, [(width * 2) - 1,
                                                      (height * 2) - 1],
                                                  method=1)
                # ResizeMethod.BILINEAR
                b_resize = tf.image.resize_images(b, [(width * 2) - 1,
                                                      (height * 2) - 1],
                                                  method=0)

                print("a_resize : ", np.shape(a_resize))
                print("b_resize : ", np.shape(b_resize))

                result = correlation(a_resize, b_resize, kernel_size,
                                     max_displacement, stride_1, stride_2,
                                     padding)  #.eval() # Sub-pixel

                print("result : ", np.shape(result))

                grad_a = tf.gradients(result[:, :, :, 0], a)
                print(grad_a)
                # print(np.shape(sess.run(grad_a)))

                grad_b = tf.gradients(result[:, :, :, 0], b)
                print(grad_b)
示例#10
0
文件: lassoLAR.py 项目: jayshonzs/ESL
def LAR(data, out, alpha=0.5):
    beta = []
    Y_means = np.ones(len(out)) * np.mean(out)
    residual = out - Y_means
    active_index = []
    origin_index = [Item(i, False) for i in range(len(data[0]))]
    B = None

    while True:
        max_correlation = -999999999
        item = None
        for it in origin_index:
            if it.active == True:
                continue
            co = correlation.correlation(data[:, it.index], residual)
            if co > max_correlation:
                max_correlation = co
                item = it
        if item == None:
            break
        item.active = True
        active_index.append(item)
        active_index = sorted(active_index, key=get_index)
        idx = 999999999
        for i in range(len(active_index)):
            if active_index[i].index == item.index:
                idx = i
                break
        beta.insert(idx, 0)
        indexes = [it.index for it in active_index]
        B = np.array(beta)
        d = direction(data[:, np.array(indexes)], out, B, residual)
        B = B + alpha * d
        for j in range(len(B)):
            if B[j] < 0.001 and B[j] > -0.001:
                it = active_index[j]
                origin_index[it.index].active = False
                del B[j]
                del active_index[j]
        residual = out - Y_means - data[:, np.array(indexes)].dot(B)
        if len(B) == 4:
            break
        beta = B.tolist()
    return B, Y_means, indexes
示例#11
0
def cnnmodel(frame1_xyz, frame1_rgb, frame2_xyz, frame2_rgb):
    frame1_rgb = tf.image.resize_images(frame1_rgb, [480, 640])
    frame2_rgb = tf.image.resize_images(frame2_rgb, [480, 640])

    frame1_feat_rgb, _ = get_network('resnet50',
                                     frame1_rgb,
                                     weight_decay=1e-5,
                                     is_training=True)
    frame2_feat_rgb, _ = get_network('resnet50',
                                     frame2_rgb,
                                     weight_decay=1e-5,
                                     is_training=True,
                                     reuse=True)

    frame1_feat = encoder(frame1_xyz)
    frame2_feat = encoder(frame2_xyz, reuse=True)

    cc_o = correlation(frame2_feat_rgb, frame1_feat_rgb, 1, rad, 1, 1, rad)
    return cc_o
示例#12
0
def main():
    dataset_path = "/home/zabot/Datasets/Lung-HCRP/Features/{}.csv"
    comb_fem_path = "/home/zabot/Documents/Codes/Mestrado/Spectra/archives/combinations_fem.txt"
    comb_dist_path = "/home/zabot/Documents/Codes/Mestrado/Spectra/archives/combinations_distances.txt"

    # Read combinations FEMs and Distances in order
    comb_fem = read_combinations(comb_fem_path)
    comb_dist = read_combinations(comb_dist_path)
    file = open("result-correlation.txt","w")
    
    for metric1, metric2 in comb_dist:
        for name_fem1, name_fem2 in comb_fem:
            data_fem1 = read_archives(dataset_path.format(name_fem1))
            data_fem2 = read_archives(dataset_path.format(name_fem2))

            corr = correlation(data_fem1, data_fem2, percentage=1, metric1=metric1, metric2=metric2)
            print(metric1, metric2, name_fem1, name_fem2, corr)
            file.write("{};{};{};{};{}\n".format(metric1,metric2,name_fem1,name_fem2,corr))
    file.close()
def test_3():
    """
    Query : Find correlation between math score and reading score
    Dataset used - https://www.kaggle.com/spscientist/students-performance-in-exams
    Columns : race/ethnicity, math score, reading score 
    """
    table = pandas.read_csv('data/data_for_test_correlation/students_performance.csv')

    result_table, suggestions = correlation.correlation(table, 'math score', 'reading score')

    print(result_table.to_string())
    print(suggestions)

    expected_result_table = """   correlation between "math score" , "reading score"
0                                            0.81758 """

    assert(expected_result_table == result_table.to_string())

    expected_suggestions = """[]"""

    assert(expected_suggestions == str(suggestions))
示例#14
0
    def test_check_output(self):
        #x_shape = (1, 196, 3, 3)
        np.random.seed(13)
        np.set_printoptions(threshold=np.inf)
        x_shape = (2, 10, 3, 3)
        x_type = 'float32'
        x1 = fluid.layers.data(name='x1',
                               shape=x_shape,
                               dtype=x_type,
                               append_batch_size=False)
        x2 = fluid.layers.data(name='x2',
                               shape=x_shape,
                               dtype=x_type,
                               append_batch_size=False)

        x1_np = np.random.randn(2, 3, 4, 5).astype(x_type)
        x2_np = np.random.randn(2, 3, 4, 5).astype(x_type)
        out_np = corr(x1_np,
                      x2_np,
                      pad_size=4,
                      kernel_size=1,
                      max_displacement=4,
                      stride1=1,
                      stride2=1)

        out = correlation(x1,
                          x2,
                          pad_size=4,
                          kernel_size=1,
                          max_displacement=4,
                          stride1=1,
                          stride2=1)

        place = fluid.CUDAPlace(0)
        exe = fluid.Executor(place)
        res = exe.run(feed={'x1': x1_np, 'x2': x2_np}, fetch_list=[out.name])

        self.assertTrue(np.allclose(res[0], out_np))
def test_1():
    """
    Query : Find correlation between NA_Sales & EU_Sales
    Dataset Used - https://www.kaggle.com/gregorut/videogamesales
    Columns :
        Platform - Platform of the games release (i.e. PC,PS4, etc.)
        NA_Sales - Sales in North America (in millions)
        EU_Sales - Sales in Europe (in millions)
    """
    table = pandas.read_csv('data/data_for_test_correlation/vgsales.csv')
    
    result_table, suggestions = correlation.correlation(table, 'NA_Sales', 'EU_Sales')

    print(result_table)
    print(suggestions)

    expected_result_table = """   correlation between "NA_Sales" , "EU_Sales"
0                                     0.767727"""

    assert(expected_result_table == result_table.to_string())
    
    expected_suggestions = """[]"""

    assert(expected_suggestions == str(suggestions))
示例#16
0
def main():
    '''
	Main function including :
	 - Arg parser Generation
	 - Graph Generation
	 - Thesaurus Generation
	 - Optionnal thesaurus test
	 - Writting Thesaurus
	'''
    parser = argparse.ArgumentParser()
    parser.add_argument("data_file", help='A .outmalt data file')
    parser.add_argument("-t",
                        "--theory",
                        default=None,
                        help='A compare file in the correct format.')
    parser.add_argument("--thesaurus",
                        type=int,
                        default=1000,
                        help='The size of the thesaurus.')
    parser.add_argument("--absolute",
                        action='store_true',
                        help='Use absolute mode.')
    parser.add_argument("-v",
                        "--verbose",
                        action='store_true',
                        help='Activate maximum detail mode.')
    parser.add_argument(
        "-l",
        "--limit",
        type=int,
        default=1000000,
        help='The number of lexemes proceed before cleaning the graph.')
    parser.add_argument(
        "-m",
        "--minimum_limit",
        type=int,
        default=10,
        help=
        'The number of occurrences needed for not being deleted when cleaning.'
    )
    parser.add_argument(
        "-w",
        "--write",
        default=None,
        help='The path to the file where thesaurus will be written.')
    args = parser.parse_args(sys.argv[1:])

    content = splitter(file_manager.read(args.data_file))
    thesau = thesaurus(
        tree_creator.token_list(content, args.limit, args.minimum_limit,
                                args.verbose))
    print("Graph has been generated. It has", len(thesau.corpus),
          "nodes inside.")

    mode = 'r'
    if args.absolute:
        mode = 'a'
    result = thesau.usable({'NC'}, thesau.cosine, thesau.PMI, mode,
                           args.thesaurus, args.verbose)
    print("The thesaurus has been generated.")

    if args.theory is not None:
        c, p = correlation(
            generate_compare(splitter(file_manager.read(args.theory))), result)
        print("With a cover of", p, "%, there is a correlation score of", c)

    if args.write:
        with open(args.write, 'w') as file:
            json.dump(result, file)
    else:
        print(result)
                                      dist_coefs['b_left'])
print "Doing Stereo Calibration for camera 14"
(R14, T14) = calibration.StereoCalibration(
    objpoints, imgpoints['t_left'], imgpoints['b_right'], cam_mats['t_left'],
    cam_mats['b_right'], dist_coefs['t_left'], dist_coefs['b_right'])
'''Read Images'''

t_left = cv2.imread(sample + '_t_left.jpeg')
t_right = cv2.imread(sample + '_t_right.jpeg')
b_left = cv2.imread(sample + '_b_left.jpeg')
b_right = cv2.imread(sample + '_b_right.jpeg')
'''Zmap'''

print "computing Z values"
Zmap = []
correlation = correlation()
for v1 in range(window_size, 480 - window_size, pixel_step):
    for u1 in range(window_size, 720 - window_size, pixel_step):
        '''80mm'''
        Z = range(80, 3081, 80)
        (correlation12, u2max_right, v2max_right) = correlation.correlation(
            R12, T12, t_left, t_right, cam_mats['t_left'], cam_mats['t_right'],
            u1, v1, window_size, Z)
        (correlation13, u2max_downl, v2max_downl) = correlation.correlation(
            R13, T13, t_left, b_left, cam_mats['t_left'], cam_mats['b_left'],
            u1, v1, window_size, Z)
        (correlation14, u2max_downr, v2max_downr) = correlation.correlation(
            R14, T14, t_left, b_right, cam_mats['t_left'], cam_mats['b_right'],
            u1, v1, window_size, Z)

        correlationAll = np.array(
def least_squares_fit(x, y):
    """given training values for x and y, find the least-squares values of 
    alpha and beta"""
    beta = correlation(x, y) * standard_deviation(y) / standard_deviation(x)
    alpha = mean(y) - beta * mean(x)
    return alpha, beta
示例#19
0
def hello_http(request):
    """HTTP Cloud Function.
    Args:
        request (flask.Request): The request object.
        <http://flask.pocoo.org/docs/1.0/api/#flask.Request>
    Returns:
        The response text, or any set of values that can be turned into a
        Response object using `make_response`
        <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>.
    """
    request_json = request.get_json(silent=True)

    request_args = request.args

    # extracting the intent parameters from the json
    intent = _get_value(request_json, 'intent')
    table = _get_value(request_json, 'table')
    metric = _get_value(request_json, 'metric')
    dimensions = _get_value(request_json, 'dimensions')
    summary_operator = _get_value(request_json, 'summaryOperator')
    slices = _get_value(request_json, 'slices')
    is_asc = _get_value(request_json, 'isAsc')
    k = _get_value(request_json, 'topKLimit')
    slices = _get_value(request_json, 'slices')
    slice_comparision_arg = _get_value(request_json, 'comparisonValue')
    time_comparision_arg = _get_value(request_json, 'compareDateRange')
    date = _get_value(request_json, 'dateRange')
    time_granularity = _get_value(request_json, 'timeGranularity')
    correlation_metrics = _get_value(request_json, 'correlationMetrics')
    rangeA1Notation = _get_value(request_json, 'rangeA1Notation')

    # Converting the list of list into a pandas dataframe.
    query_table = []
    for row in range(1, len(table)):
        if row != 0:
            query_table.append(table[row])
    query_table_dataframe = pandas.DataFrame(query_table, columns=table[0])

    (all_dimensions,
     all_metrics) = _list_all_dimensions_metrics(query_table_dataframe,
                                                 dimensions, metric)

    # Remove empty columns
    query_table_dataframe = remove_empty_columns(query_table_dataframe)

    # Remove duplicate named columns
    query_table_dataframe = remove_duplicate_named_columns(
        query_table_dataframe)

    # Converting the variables that contain denote the
    # date range into the desired format.
    date_column_name = None
    date_range = None
    day_first = None
    if date != None:
        date_columns = request_json['dateColumns']
        date_column_name = date['dateCol']
        date_range = (date['dateStart'], date['dateEnd'])
        day_first = date_columns[date_column_name]['day_first']

    # Converting the Slices passed in the json into a
    # list of tuples (col, operator, val)
    slices_list = None
    if slices != None:
        slices_list = []
        for item in slices:
            val = item['sliceVal']
            col = item['sliceCol']
            operator = _str_to_filter_enum(item['sliceOp'])
            slices_list.append((col, operator, val))

    if dimensions == 'null':
        dimensions = None

    if slice_comparision_arg is not None:
        slice_compare_column = slice_comparision_arg['comparisonColumn']
        slice1 = slice_comparision_arg['slice1']
        slice2 = slice_comparision_arg['slice2']

    if time_comparision_arg is not None:
        time_compare_column = time_comparision_arg['dateCol']
        date_range1 = (time_comparision_arg['dateStart1'],
                       time_comparision_arg['dateEnd1'])
        date_range2 = (time_comparision_arg['dateStart2'],
                       time_comparision_arg['dateEnd2'])
        day_first = request_json['dateColumns'][time_compare_column][
            'day_first']

    if metric == 'null':
        metric = None

    summary_operator = _str_to_summary_operator_enum(summary_operator)

    time_granularity = _str_to_time_granularity_enum(time_granularity)

    suggestions = []

    wrong_points_suggestion = wrong_points.wrong_points(query_table_dataframe)

    if intent == 'show':
        query_table_dataframe = show(query_table_dataframe,
                                     slices=slices_list,
                                     metric=metric,
                                     dimensions=dimensions,
                                     summary_operator=summary_operator,
                                     date_column_name=date_column_name,
                                     day_first=day_first,
                                     date_range=date_range)

        if summary_operator == enums.SummaryOperators.MEAN:
            suggestions.append(get_hardcoded_mean_vs_median_suggestion())

        updated_suggestions = []
        for suggestion in suggestions:
            updated_suggestion = suggestion
            if 'change_list' in suggestion.keys():
                updated_suggestion['json'] = func(request_json,
                                                  suggestion['change_list'])
            updated_suggestions.append(updated_suggestion)

        suggestions = updated_suggestions

    elif intent == 'topk':
        query_result = topk.topk(query_table_dataframe,
                                 metric,
                                 dimensions,
                                 is_asc,
                                 k,
                                 summary_operator=summary_operator,
                                 slices=slices_list,
                                 date_column_name=date_column_name,
                                 day_first=day_first,
                                 date_range=date_range)
        query_table_dataframe = query_result[0]
        suggestions = query_result[1]
        updated_suggestions = []
        for suggestion in suggestions:
            updated_suggestion = suggestion
            if 'change_list' in suggestion.keys():
                updated_suggestion['json'] = func(request_json,
                                                  suggestion['change_list'])
            updated_suggestion['oversight'] = updated_suggestion[
                'oversight'].name
            updated_suggestions.append(updated_suggestion)

        suggestions = updated_suggestions

    elif intent == 'slice_compare':
        query_result = slice_compare.slice_compare(
            query_table_dataframe,
            metric,
            all_dimensions,
            all_metrics,
            slice_compare_column,
            slice1,
            slice2,
            summary_operator,
            date_column_name=date_column_name,
            date_range=date_range,
            day_first=day_first,
            slices=slices_list,
            dimensions=dimensions)
        query_table_dataframe = query_result[0]
        suggestions = query_result[1]
        updated_suggestions = []

        for suggestion in suggestions:
            updated_suggestion = suggestion
            if 'change_list' in suggestion.keys():
                updated_suggestion['json'] = func(request_json,
                                                  suggestion['change_list'])
            updated_suggestion['oversight'] = updated_suggestion[
                'oversight'].name
            updated_suggestions.append(updated_suggestion)

        suggestions = updated_suggestions

    elif intent == 'time_compare':
        query_result = time_compare.time_compare(query_table_dataframe,
                                                 metric,
                                                 all_dimensions,
                                                 time_compare_column,
                                                 date_range1,
                                                 date_range2,
                                                 day_first,
                                                 summary_operator,
                                                 slices=slices_list,
                                                 dimensions=dimensions)
        query_table_dataframe = query_result[0]
        suggestions = query_result[1]
        updated_suggestions = []

        for suggestion in suggestions:
            updated_suggestion = suggestion
            if 'change_list' in suggestion.keys():
                updated_suggestion['json'] = func(request_json,
                                                  suggestion['change_list'])
            updated_suggestion['oversight'] = updated_suggestion[
                'oversight'].name
            updated_suggestions.append(updated_suggestion)

        suggestions = updated_suggestions

    elif intent == 'correlation':
        query_table_dataframe = correlation.correlation(
            query_table_dataframe,
            correlation_metrics['metric1'],
            correlation_metrics['metric2'],
            slices=slices_list,
            date_column_name=date_column_name,
            day_first=day_first,
            date_range=date_range,
            dimensions=dimensions)

    elif intent == 'trend':
        query_table_dataframe = trend.trend(query_table_dataframe,
                                            metric,
                                            time_granularity,
                                            summary_operator,
                                            date_column_name=date_column_name,
                                            day_first=day_first,
                                            date_range=date_range,
                                            slices=slices_list)

    else:
        raise Exception("Intent name does not match")

    if wrong_points_suggestion is not None:
        wrong_points_suggestion['oversight'] = wrong_points_suggestion[
            'oversight'].name
        suggestions = [wrong_points_suggestion] + suggestions

    final_table = []

    # converting into a json object and returning
    final_table = query_table_dataframe.values.tolist()
    final_table.insert(0, list(query_table_dataframe.columns.values))

    json_ret = {'outputTable': final_table, 'suggestions': suggestions}

    if rangeA1Notation is not None:
        all_row_labels = _get_all_row_labels(rangeA1Notation)
        all_column_labels = _get_all_column_labels(rangeA1Notation)
        cheader_to_clabel = _get_cheader_to_clabel(table, all_column_labels)

        if slices_list is not None:
            json_ret[
                'slicing_passed_list'] = insert_as_column.insert_as_column_show(
                    table,
                    cheader_to_clabel,
                    all_row_labels[0],
                    all_row_labels[-1],
                    all_column_labels[0],
                    all_column_labels[-1],
                    slices=slices_list)

        if intent == 'topk' and summary_operator is None:
            filter_column_label_number = _get_number_of_column_label(
                all_column_labels[-1]) + 1
            filter_column_label = _get_label_from_number(
                filter_column_label_number)

            json_ret[
                'list_topk_indices'] = insert_as_column.insert_as_column_topk_column(
                    table, cheader_to_clabel, all_row_labels[0],
                    all_row_labels[-1], all_column_labels[0],
                    all_column_labels[-1], filter_column_label, metric, is_asc,
                    k)

    json_string = json.dumps(json_ret)
    return json_string
示例#20
0
HEIGHT = 2
WIDTH = 2
CHANNELS = 1

NEIGHBORHOOD_SIZE = 2
MAX_DISPLACEMENT = int(math.ceil(NEIGHBORHOOD_SIZE / 2.0))
STRIDE_2 = 1

test_forward = False

if test_forward:
  with tf.Session('') as sess:
    with tf.device('/gpu:0'):
      fmA = tf.ones((BATCH_SIZE, HEIGHT, WIDTH, CHANNELS), dtype=tf.float32)
      fmB = tf.convert_to_tensor(np.random.randint(1,5, size=(BATCH_SIZE, HEIGHT, WIDTH, CHANNELS)), dtype=tf.float32)
      corr = correlation(fmA,fmB,1,1,1,1,1) # input_a, input_b, kernel_size, max_displacement, stride_1, stride_2, padding
      sess.run(tf.initialize_all_variables())
      corr_, fmA_, fmB_= sess.run([corr,fmA,fmB])
      print(corr_[0])
      print(fmA_[0][:,:,0])
      print(fmB_[0][:,:,0]) 
else:
  with tf.Session('') as sess:
    fmA1 = np.ones((BATCH_SIZE, HEIGHT, WIDTH, CHANNELS)).astype('float32')
    fmB1 = np.random.randn(BATCH_SIZE, HEIGHT, WIDTH, CHANNELS).astype('float32')
    with tf.device('/gpu:0'):
      fmA = tf.constant(fmA1)   
      fmB = tf.Variable(fmB1)
      corr = correlation(fmA,fmB,1,1,1,1,1)
      loss = tf.reduce_sum(corr)
      train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss)
# Grafico com aumento da temperatura no Brasil ao longo dos anos
figBrasil = graficoBrasil(temperaturas_globais_paises)
figBrasil.show()

# Grafico com top paises com maior diferenca entre temperatura média ao longo dos anos e temperatura máxima (que tiveram maior aumento)
figDiferenca = graficoDiferencaGeral(temperaturas_globais_cidades)
figDiferenca.show()

# Verifica as features mais relacionadas ao target
featureSelection(temperaturas_globais)
regression(temperaturas_globais)
regressionMultipleParameters(temperaturas_globais)

# Regressoes para Media Anual de Temperatura Global
modelo = regressao_ano_temp_media(temperaturas_globais)
previsao_temp_media_futura(temperaturas_globais, modelo, 2101)

# Regressoes para Media Anual de Temperatura Global
modelo = regressao_ano_temp_media(temperaturas_globais)
previsao_temp_media_futura(temperaturas_globais, modelo, 2101)
modelo_brasil = regressao_ano_temp_media_BRASIL(temperaturas_globais_cidades)
previsao_temp_media_futura_BRASIL(temperaturas_globais_cidades, modelo_brasil,
                                  2101)

# Correlação Temp Media com Temp LandAndOcean
correlation(temperaturas_globais)
predictLandAndOcean(
    temperaturas_globais, 5.518
)  # pode ser maneiro usar aqui em vez de hardcoded um valor que foi previsto anterior
modelo = regressionLandToOcean(temperaturas_globais)
graficoPredicao(temperaturas_globais, modelo, 20)
def net_structure(img1, img2):
    with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
                        # He (aka MSRA) weight initialization
                        weights_initializer=slim.variance_scaling_initializer(),
                        activation_fn=LeakyReLU,
                        # We will do our own padding to match the original Caffe code
                        padding='VALID'):
        weights_regularizer = slim.l2_regularizer(weight_decay)
        with slim.arg_scope([slim.conv2d], weights_regularizer=weights_regularizer):
            with slim.arg_scope([slim.conv2d], stride=2):
                conv_a_1 = slim.conv2d(pad(img1, 3), 64, 7, scope='conv1')
                conv_a_2 = slim.conv2d(pad(conv_a_1, 2), 128, 5, scope='conv2')
                conv_a_3 = slim.conv2d(pad(conv_a_2, 2), 256, 5, scope='conv3')

                conv_b_1 = slim.conv2d(pad(img2, 3), 64, 7, scope='conv1', reuse=True)
                conv_b_2 = slim.conv2d(pad(conv_b_1, 2), 128, 5, scope='conv2', reuse=True)
                conv_b_3 = slim.conv2d(pad(conv_b_2, 2), 256, 5, scope='conv3', reuse=True)

                # Compute cross correlation with leaky relu activation
                cc = correlation.correlation(conv_a_3, conv_b_3, 1, 20, 1, 2, 20)
                cc_relu = LeakyReLU(cc)

            # Combine cross correlation results with convolution of feature map A
            netA_conv = slim.conv2d(conv_a_3, 32, 1, scope='conv_redir')
            # Concatenate along the channels axis
            net = tf.concat([netA_conv, cc_relu], axis=3)

            conv3_1 = slim.conv2d(pad(net), 256, 3, scope='conv3_1')
            with slim.arg_scope([slim.conv2d], num_outputs=512, kernel_size=3):
                conv4 = slim.conv2d(pad(conv3_1), stride=2, scope='conv4')
                conv4_1 = slim.conv2d(pad(conv4), scope='conv4_1')
                conv5 = slim.conv2d(pad(conv4_1), stride=2, scope='conv5')
                conv5_1 = slim.conv2d(pad(conv5), scope='conv5_1')
            conv6 = slim.conv2d(pad(conv5_1), 1024, 3, stride=2, scope='conv6')
            conv6_1 = slim.conv2d(pad(conv6), 1024, 3, scope='conv6_1')

            """ START: Refinement Network """
            with slim.arg_scope([slim.conv2d_transpose], biases_initializer=None):
                predict_flow6 = slim.conv2d(pad(conv6_1), 2, 3,
                                            scope='predict_flow6',
                                            activation_fn=None)
                deconv5 = antipad(slim.conv2d_transpose(conv6_1, 512, 4,
                                                        stride=2,
                                                        scope='deconv5'))
                upsample_flow6to5 = antipad(slim.conv2d_transpose(predict_flow6, 2, 4,
                                                                  stride=2,
                                                                  scope='upsample_flow6to5',
                                                                  activation_fn=None))
                concat5 = tf.concat([conv5_1, deconv5, upsample_flow6to5], axis=3)

                predict_flow5 = slim.conv2d(pad(concat5), 2, 3,
                                            scope='predict_flow5',
                                            activation_fn=None)
                deconv4 = antipad(slim.conv2d_transpose(concat5, 256, 4,
                                                        stride=2,
                                                        scope='deconv4'))
                upsample_flow5to4 = antipad(slim.conv2d_transpose(predict_flow5, 2, 4,
                                                                  stride=2,
                                                                  scope='upsample_flow5to4',
                                                                  activation_fn=None))
                concat4 = tf.concat([conv4_1, deconv4, upsample_flow5to4], axis=3)

                predict_flow4 = slim.conv2d(pad(concat4), 2, 3,
                                            scope='predict_flow4',
                                            activation_fn=None)
                deconv3 = antipad(slim.conv2d_transpose(concat4, 128, 4,
                                                        stride=2,
                                                        scope='deconv3'))
                upsample_flow4to3 = antipad(slim.conv2d_transpose(predict_flow4, 2, 4,
                                                                  stride=2,
                                                                  scope='upsample_flow4to3',
                                                                  activation_fn=None))
                concat3 = tf.concat([conv3_1, deconv3, upsample_flow4to3], axis=3)

                predict_flow3 = slim.conv2d(pad(concat3), 2, 3,
                                            scope='predict_flow3',
                                            activation_fn=None)
                deconv2 = antipad(slim.conv2d_transpose(concat3, 64, 4,
                                                        stride=2,
                                                        scope='deconv2'))
                upsample_flow3to2 = antipad(slim.conv2d_transpose(predict_flow3, 2, 4,
                                                                  stride=2,
                                                                  scope='upsample_flow3to2',
                                                                  activation_fn=None))
                concat2 = tf.concat([conv_a_2, deconv2, upsample_flow3to2], axis=3)

                predict_flow2 = slim.conv2d(pad(concat2), 2, 3,
                                            scope='predict_flow2',
                                            activation_fn=None)
            """ END: Refinement Network """

            '''new loss'''
            # target_height, target_width = int(predict_flow2.shape[1].value), int(predict_flow2.shape[2].value)
            # predict_flow6 = tf.image.resize_bilinear(predict_flow6,
            #                                          tf.stack([target_height, target_width]),
            #                                          align_corners=True)
            # predict_flow5 = tf.image.resize_bilinear(predict_flow5,
            #                                          tf.stack([target_height, target_width]),
            #                                          align_corners=True)
            # predict_flow4 = tf.image.resize_bilinear(predict_flow4,
            #                                          tf.stack([target_height, target_width]),
            #                                          align_corners=True)
            # predict_flow3 = tf.image.resize_bilinear(predict_flow3,
            #                                          tf.stack([target_height, target_width]),
            #                                          align_corners=True)
            # predict = tf.concat([predict_flow5, predict_flow4, predict_flow3, predict_flow2], axis=3)
            # flow = predict * 20.0
            # flow_temp0 = slim.conv2d(pad(predict), num_outputs=2, kernel_size=2, stride=1, scope='flow_temp0')
            # flow_temp = tf.image.resize_bilinear(flow_temp0,
            #                                      tf.stack([img_height, img_width]),
            #                                      align_corners=True)
            # flow = flow_temp * 20.0

            flow = predict_flow2 * 20.0
            # TODO: Look at Accum (train) or Resample (deploy) to see if we need to do something different
            flow = tf.image.resize_bilinear(flow,
                                            tf.stack([img_height, img_width]),
                                            align_corners=True)

            return {
                'predict_flow6': predict_flow6,
                'predict_flow5': predict_flow5,
                'predict_flow4': predict_flow4,
                'predict_flow3': predict_flow3,
                'predict_flow2': predict_flow2,
                'flow': flow,
            }
    def test_correlationHardCoded(self):
        with tf.device('/gpu:0'):
            with tf.Session(''):
                batch_size = 1
                width = 40
                height = 56
                depth = 256
                stride_1 = 1.0

                stride_2 = 0.5
                # 0.5 # 2.0;
                #stride_2 = 2.0;
                #stride_2 = 2.0;

                #max_displacement = 5
                max_displacement = 20
                #max_displacement = 80 # 40

                # sigma = -2.0; # test for error
                sigma = 1.0
                kernel_size = 3

                #padding = 6
                padding = 21
                #padding = 81 # 41

                #expected_depth = (2*int(max_displacement/stride)+1)**2;
                my_shape = (batch_size, width, height, depth)

                a_rand = np.random.randint(10, size=my_shape)
                b_rand = np.random.randint(10, size=my_shape)

                # (input_a, input_b, kernel_size, max_displacement, stride_1, stride_2, padding, sigma)
                print("##########")
                print("kernel_size : ", kernel_size)
                print("max_displacement : ", max_displacement)
                print("stride_1 : ", stride_1)
                print("stride_2 : ", stride_2)
                print("padding : ", padding)
                print("##########\n")
                print("a_rand : ", np.shape(a_rand))
                print("b_rand : ", np.shape(b_rand))
                #result = correlation(np.ones(my_shape), np.ones(my_shape), kernel_size, max_displacement, stride_1, stride_2, padding, sigma).eval() # Gaussian
                result = correlation(np.ones(my_shape), np.ones(my_shape),
                                     kernel_size, max_displacement, stride_1,
                                     stride_2, padding).eval()  # Sub-pixel
                print("result : ", np.shape(result))
                print("\n\n")

                #result = correlation(np.ones(my_shape), np.ones(my_shape), stride=stride, max_displacement=max_displacement, kernel_size=kernel_size, sigma=sigma).eval()

                # result = cl.corr(a_rand, b_rand, kernel_size=kernel_size, sigma=sigma).eval()
                # result = cl.corr(np.ones(my_shape), np.ones(my_shape)).eval()

                #self.assertEqual(result.shape[0], my_shape[0])
                #self.assertEqual(result.shape[1], my_shape[1])
                #self.assertEqual(result.shape[2], my_shape[2])
                #self.assertEqual(result.shape[3], expected_depth)

                # print(result[0, 0, 0, 220])
                # self.assertEqual(result[0, 0, 0, 220], 1)

                # print(result[0, 0, 0, 0])
                # self.assertEqual(result[0, 0, 0, 0], 0)

                print("HardCoded test")
                #print(result, end="\n\n")

                # print(np.ones(my_shape))
                # print(result[0, :, :, 0], end="\n\n")
                #print(result[0, :, :, 1], end="\n\n")
                #print(result[0, :, :, 2], end="\n\n")
                #print(result[0, :, :, 7], end="\n\n")
                ##print(result[0, :, :, 24], end="\n\n")
                #print(result[0, :, :, 25], end="\n\n")
                '''print(result[0, :, :, 41], end="\n\n")
示例#24
0
 def testClass(self):
     correlation(rangen=10, label='da')
     correlation(rangen=5, label='A')
     correlation(rangen=5)
     correlation(label='A')
     correlation()
示例#25
0
'''
from correlation import correlation

import numpy as np

if __name__ == '__main__':

    series1 = np.loadtxt('test_series1.txt')
    series2 = np.loadtxt('test_series2.txt')

    print 'Call the cslick correlation function as follows:'
    print 'cslick=correlation(x1,y1,x2,y2,hc)'
    print '''where (x1,y1) and (x2,y2) are two differently and unevenly sampled series to be correlated,
in this implementation these are numpy arrays. Optional argument hc (default=0.4) is a coefficient to
tune  how closely data from the two series must be to be included in the calculation.\n'''

    print "test_series1.txt"
    for row in series1[:]:
        print "{:1.6f} {:1.6f}".format(row[0], row[1])
    print "\ntest_series2.txt"
    for row in series2[:]:
        print "{:1.6f} {:1.6f}".format(row[0], row[1])

    cslick = correlation(series1[:, 1],
                         series2[:, 1],
                         series1[:, 0],
                         series2[:, 0],
                         hc=0.4)

    print '\nCorrelation: {}'.format(cslick)
示例#26
0
def cnnmodel(frame1_xyz,frame1_rgb,frame2_xyz,frame2_rgb):
  #frame1_input = tf.concat([frame1_xyz,frame1_rgb],3)
  #frame2_input = tf.concat([frame2_xyz,frame2_rgb],3)

  frame1_feat_rgb = encoder_rgb(frame1_rgb)
  frame2_feat_rgb = encoder_rgb(frame2_rgb,reuse=True)

  frame1_feat = encoder(frame1_xyz)
  frame2_feat = encoder(frame2_xyz,reuse=True)

  cc = correlation(frame2_feat_rgb,frame1_feat_rgb,1,rad,1,1,rad)
 
  cc_relu = LeakyReLU(cc)
 
  frame1_feat = tf.transpose(frame1_feat,[0,3,1,2])
 
  frame1_feat_padded = tf.pad(frame1_feat,paddings=[[0,0],[0,0],[rad,rad],[rad,rad]])
  
  frame1_list = []
  for i in xrange(30):
    for j in xrange(40):
      tmp = frame1_feat_padded[:,:,0+i:2*rad+1+i,0+j:2*rad+1+j]
      tmp = tf.reshape(tmp,[-1,64,dia * dia])
      frame1_list.append(tmp)

  frame1_list = tf.stack(frame1_list,axis=2)
  frame1_list = tf.transpose(frame1_list,[0,2,3,1])
  cc_relu = tf.reshape(cc_relu,[-1,30*40,dia * dia,1])

  frame1_list = frame1_list * cc_relu
  frame1_list = tf.nn.max_pool(frame1_list,ksize=[1,1,dia * dia,1],strides=[1,1,dia * dia,1],padding='VALID')
  frame1_list = tf.reshape(frame1_list,(-1,30,40,64))
  
  x = tf.concat([frame2_feat,frame1_list],3)

  x=tflearn.layers.conv.conv_2d(x,64,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
  x=tflearn.layers.conv.conv_2d(x,64,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
  x2 = x
  x=tflearn.layers.conv.conv_2d(x,128,(5,5),strides=2,activation='relu',weight_decay=1e-5,regularizer='L2')

#15, 20
  x=tflearn.layers.conv.conv_2d(x,128,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
  x=tflearn.layers.conv.conv_2d(x,128,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
  x3 = x
  x=tflearn.layers.conv.conv_2d(x,256,(5,5),strides=2,activation='relu',weight_decay=1e-5,regularizer='L2')

#8, 10
  x=tflearn.layers.conv.conv_2d(x,256,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
  x=tflearn.layers.conv.conv_2d(x,256,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
  x4 = x
  x=tflearn.layers.conv.conv_2d(x,512,(5,5),strides=2,activation='relu',weight_decay=1e-5,regularizer='L2')

#4, 5
  x=tflearn.layers.conv.conv_2d(x,512,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
  x=tflearn.layers.conv.conv_2d(x,512,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
  x=tflearn.layers.conv.conv_2d_transpose(x,256,[5,5],[8,10],strides=2,activation='linear',weight_decay=1e-5,regularizer='L2')


#8,10 
  x4=tflearn.layers.conv.conv_2d(x4,256,(3,3),strides=1,activation='linear',weight_decay=1e-5,regularizer='L2')
  x = tf.nn.relu(tf.add(x,x4))
  x=tflearn.layers.conv.conv_2d(x,512,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')  
  x=tflearn.layers.conv.conv_2d(x,256,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
  x=tflearn.layers.conv.conv_2d_transpose(x,128,[5,5],[15,20],strides=2,activation='linear',weight_decay=1e-5,regularizer='L2')


#15,20
  x3=tflearn.layers.conv.conv_2d(x3,128,(3,3),strides=1,activation='linear',weight_decay=1e-5,regularizer='L2')
  x = tf.nn.relu(tf.add(x,x3))
  x=tflearn.layers.conv.conv_2d(x,128,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
  x=tflearn.layers.conv.conv_2d(x,128,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')  
  x=tflearn.layers.conv.conv_2d_transpose(x,64,[5,5],[30,40],strides=2,activation='linear',weight_decay=1e-5,regularizer='L2')

#30,40
  x2=tflearn.layers.conv.conv_2d(x2,64,(3,3),strides=1,activation='linear',weight_decay=1e-5,regularizer='L2')
  x = tf.nn.relu(tf.add(x,x2))
  x=tflearn.layers.conv.conv_2d(x,128,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2') 
  x=tflearn.layers.conv.conv_2d(x,64,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
  x=tflearn.layers.conv.conv_2d_transpose(x,64,[5,5],[60,80],strides=2,activation='linear',weight_decay=1e-5,regularizer='L2')

#60,80
  x=tflearn.layers.conv.conv_2d(x,64,(3,3),strides=1,activation='linear',weight_decay=1e-5,regularizer='L2')
  x=tflearn.layers.conv.conv_2d(x,64,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
  x=tflearn.layers.conv.conv_2d(x,64,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
  x=tflearn.layers.conv.conv_2d_transpose(x,64,[5,5],[120,160],strides=2,activation='linear',weight_decay=1e-5,regularizer='L2')

#120,160
  x=tflearn.layers.conv.conv_2d(x,64,(3,3),strides=1,activation='linear',weight_decay=1e-5,regularizer='L2')  
  x=tflearn.layers.conv.conv_2d(x,64,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
  x=tflearn.layers.conv.conv_2d(x,64,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
  x=tflearn.layers.conv.conv_2d_transpose(x,64,[5,5],[240,320],strides=2,activation='linear',weight_decay=1e-5,regularizer='L2')

#240,320
  x=tflearn.layers.conv.conv_2d(x,64,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')  
 
#### success
  x_s = tflearn.layers.conv.conv_2d(x,64,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')

  pred_frame2_xyz =tflearn.layers.conv.conv_2d(x_s,3,(3,3),strides=1,activation='linear',weight_decay=1e-3,regularizer='L2')
  pred_frame2_r = tflearn.layers.conv.conv_2d(x_s,1,(3,3),strides=1,activation='linear',weight_decay=1e-3,regularizer='L2')
  pred_frame2_mask = tflearn.layers.conv.conv_2d(x_s,2,(3,3),strides=1,activation='linear',weight_decay=1e-3,regularizer='L2')
  pred_frame2_score = tflearn.layers.conv.conv_2d(x_s,2,(3,3),strides=1,activation='linear',weight_decay=1e-3,regularizer='L2')

  pred_frame2_xyz = tf.add(pred_frame2_xyz,frame2_xyz)
 
  x_transl = tflearn.layers.conv.conv_2d(x_s,3,(3,3),strides=1,activation='linear',weight_decay=1e-3,regularizer='L2')
  x_rot = tflearn.layers.conv.conv_2d(x_s,3,(3,3),strides=1,activation='linear',weight_decay=1e-3,regularizer='L2')

  return pred_frame2_mask, pred_frame2_r, pred_frame2_xyz, pred_frame2_score, x_transl, x_rot
示例#27
0
文件: net.py 项目: vitionxp/iResNet-1
def net(left, right):
    with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
                        activation_fn=nn.relu,
                        padding='SAME'):
        with tf.name_scope('Multi-Share'):
            conv1a = slim.conv2d(left,
                                 64, [7, 7],
                                 stride=2,
                                 scope='conv1',
                                 reuse=False)
            conv1b = slim.conv2d(right,
                                 64, [7, 7],
                                 stride=2,
                                 scope='conv1',
                                 reuse=True)

            up_1a = slim.conv2d_transpose(conv1a,
                                          32, [4, 4],
                                          stride=2,
                                          scope='up_1',
                                          reuse=False)
            up_1b = slim.conv2d_transpose(conv1b,
                                          32, [4, 4],
                                          stride=2,
                                          scope='up_1',
                                          reuse=True)

            conv2a = slim.conv2d(conv1a,
                                 128, [5, 5],
                                 stride=2,
                                 scope='conv2',
                                 reuse=False)
            conv2b = slim.conv2d(conv1b,
                                 128, [5, 5],
                                 stride=2,
                                 scope='conv2',
                                 reuse=True)

            up_2a = slim.conv2d_transpose(conv2a,
                                          32, [8, 8],
                                          stride=4,
                                          scope='up_2',
                                          reuse=False)
            up_2b = slim.conv2d_transpose(conv2b,
                                          32, [8, 8],
                                          stride=4,
                                          scope='up_2',
                                          reuse=True)

            up_1a2a_concat = tf.concat([up_1a, up_2a], 3)
            up_1b2b_concat = tf.concat([up_1b, up_2b], 3)
            up_1a2a = slim.conv2d(up_1a2a_concat,
                                  32, [1, 1],
                                  scope='up_12_concat',
                                  reuse=False)
            up_1b2b = slim.conv2d(up_1b2b_concat,
                                  32, [1, 1],
                                  scope='up_12_concat',
                                  reuse=True)

        with tf.name_scope('DES-net'):
            corr1d = correlation.correlation(conv2a, conv2b, 1, 8, 1, 2, 8)
            conv_redir = slim.conv2d(conv2a, 64, [1, 1])
            corr1d_redir_concat = tf.concat([corr1d, conv_redir], 3)
            conv3 = slim.conv2d(corr1d_redir_concat, 256, [3, 3], stride=2)
            conv3_1 = slim.conv2d(conv3, 256, [3, 3])

            conv4 = slim.conv2d(conv3_1, 512, [3, 3], stride=2)
            conv4_1 = slim.conv2d(conv4, 512, [3, 3])

            conv5 = slim.conv2d(conv4_1, 512, [3, 3], stride=2)
            conv5_1 = slim.conv2d(conv5, 512, [3, 3])

            conv6 = slim.conv2d(conv5, 1024, [3, 3], stride=2)
            conv6_1 = slim.conv2d(conv6, 1024, [3, 3])

            disp6 = slim.conv2d(conv6_1, 1, [3, 3], activation_fn=None)
            resized_6 = tf.image.resize_images(disp6, [
                int(math.ceil(IMAGE_HIGHT / 32.)),
                int(math.ceil(IMAGE_WITCH / 32.))
            ])

            up_conv5 = slim.conv2d_transpose(conv6_1, 512, [4, 4], stride=2)
            iconv5_concat = tf.concat([up_conv5, resized_6, conv5_1], 3)
            iconv5 = slim.conv2d(iconv5_concat, 512, [3, 3])

            disp5 = slim.conv2d(iconv5, 1, [3, 3], activation_fn=None)
            resized_5 = tf.image.resize_images(disp5, [
                int(math.ceil(IMAGE_HIGHT / 16.)),
                int(math.ceil(IMAGE_WITCH / 16.))
            ])

            up_conv4 = slim.conv2d_transpose(iconv5, 256, [4, 4], stride=2)
            iconv4_concat = tf.concat([up_conv4, resized_5, conv4_1], 3)
            iconv4 = slim.conv2d(iconv4_concat, 256, [3, 3])

            disp4 = slim.conv2d(iconv4, 1, [3, 3], activation_fn=None)
            resized_4 = tf.image.resize_images(disp4, [
                int(math.ceil(IMAGE_HIGHT / 8.)),
                int(math.ceil(IMAGE_WITCH / 8.))
            ])

            up_conv3 = slim.conv2d_transpose(iconv4, 128, [4, 4], stride=2)
            iconv3_concat = tf.concat([up_conv3, resized_4, conv3_1], 3)
            iconv3 = slim.conv2d(iconv3_concat, 128, [3, 3])

            disp3 = slim.conv2d(iconv3, 1, [3, 3], activation_fn=None)
            resized_3 = tf.image.resize_images(disp3, [
                int(math.ceil(IMAGE_HIGHT / 4.)),
                int(math.ceil(IMAGE_WITCH / 4.))
            ])

            up_conv2 = slim.conv2d_transpose(iconv3, 64, [4, 4], stride=2)
            iconv2_concat = tf.concat([up_conv2, resized_3, conv2a], 3)
            iconv2 = slim.conv2d(iconv2_concat, 64, [3, 3])

            disp2 = slim.conv2d(iconv2, 1, [3, 3], activation_fn=None)
            resized_2 = tf.image.resize_images(disp2, [
                int(math.ceil(IMAGE_HIGHT / 2.)),
                int(math.ceil(IMAGE_WITCH / 2.))
            ])

            up_conv1 = slim.conv2d_transpose(iconv2, 32, [4, 4], stride=2)
            iconv1_concat = tf.concat([up_conv1, resized_2, conv1a], 3)
            iconv1 = slim.conv2d(iconv1_concat, 32, [3, 3])

            disp1 = slim.conv2d(iconv1, 1, [3, 3], activation_fn=None)
            resized_1 = tf.image.resize_images(disp1,
                                               [IMAGE_HIGHT, IMAGE_WITCH])

            up_conv0 = slim.conv2d_transpose(iconv1, 32, [4, 4], stride=2)
            iconv0_concat = tf.concat([up_conv0, resized_1, up_1a2a], 3)
            iconv0 = slim.conv2d(iconv0_concat, 32, [3, 3])

            disp0 = slim.conv2d(iconv0, 1, [3, 3], activation_fn=None)

        with tf.name_scope('DRS-net'):
            r_conv0_concat = tf.concat(
                [abs(up_1a2a - up_1b2b), disp0, up_1a2a], 3)
            r_conv0 = slim.conv2d(r_conv0_concat, 32, [3, 3])

            r_conv1 = slim.conv2d(r_conv0, 64, [3, 3], stride=2)

            c_conv1a = slim.conv2d(conv1a,
                                   16, [3, 3],
                                   scope='c_conv1',
                                   reuse=False)
            c_conv1b = slim.conv2d(conv1b,
                                   16, [3, 3],
                                   scope='c_conv1',
                                   reuse=True)

            r_corr = correlation.correlation(c_conv1a, c_conv1b, 1, 4, 1, 2, 4)

            r_conv1_1_concat = tf.concat([r_conv1, r_corr], 3)
            r_conv1_1 = slim.conv2d(r_conv1_1_concat, 64, [3, 3])

            r_conv2 = slim.conv2d(r_conv1_1, 128, [3, 3], stride=2)
            r_conv2_1 = slim.conv2d(r_conv2, 128, [3, 3])

            r_res2 = slim.conv2d(r_conv2_1, 1, [3, 3], activation_fn=None)
            r_res2_resize = tf.image.resize_images(r_res2, [
                int(math.ceil(IMAGE_HIGHT / 2)),
                int(math.ceil(IMAGE_WITCH / 2))
            ])

            r_upconv1 = slim.conv2d_transpose(r_conv2_1, 64, [4, 4], stride=2)
            r_iconv1_concat = tf.concat([r_upconv1, r_res2_resize, r_conv1_1],
                                        3)
            r_iconv1 = slim.conv2d(r_iconv1_concat, 64, [3, 3])

            r_res1 = slim.conv2d(r_iconv1, 1, [3, 3], activation_fn=None)
            r_res1_resize = tf.image.resize_images(r_res1,
                                                   [IMAGE_HIGHT, IMAGE_WITCH])

            r_upconv0 = slim.conv2d_transpose(r_conv1, 32, [4, 4], stride=2)
            r_iconv0_concat = tf.concat([r_upconv0, r_res1_resize, r_conv0], 3)
            r_iconv0 = slim.conv2d(r_iconv0_concat, 32, [3, 3])

            r_res0 = slim.conv2d(r_iconv0, 1, [3, 3], activation_fn=None)

        return {
            'disp6': disp6,
            'disp5': disp5,
            'disp4': disp4,
            'disp3': disp3,
            'disp2': disp2,
            'disp1': disp1,
            'disp0': disp0 + r_res0
        }
示例#28
0
 def testEq(self):
     corr = correlation(rangen=10)
     self.assertEqual(corr.rangen, corr.rangen)
     self.assertEqual(len(corr.label), 4)
示例#29
0
# -*- coding: utf-8 -*-
#! /usr/bin/python

import os.path

rootdir = "E:/bda-pylib/statistics/correlationModel"
os.chdir(rootdir)

from correlation import correlation

if __name__ == '__main__':
    col = "V0,V1,V2,V3"
    input = "data/iris.csv"
    method = 'pearson'  # 'pearson', 'kendall', 'spearman'
    correlation(col, input, method)
示例#30
0
 def testMerge(self):
     corr = correlation(rangen=13)
     self.assertEqual(len(weather),
                      len(corr.weather_collision_merge(weather, data)))
示例#31
0
 def testinit(self):
     corr = correlation(rangen=5, label='A')
     self.assertEqual(corr.rangen, 5)
     self.assertEqual(corr.label, 'A')
示例#32
0
def cnnmodel(frame1_xyz,frame1_rgb,frame2_xyz,frame2_rgb):
  frame1_feat_rgb,_ = get_network('resnet50',frame1_rgb,weight_decay=1e-5, is_training=True)
  frame2_feat_rgb,_ = get_network('resnet50',frame2_rgb,weight_decay=1e-5, is_training=True, reuse=True)
  
  frame1_feat = encoder(frame1_xyz)
  frame2_feat = encoder(frame2_xyz,reuse=True)
  
  cc_o = correlation(frame2_feat_rgb,frame1_feat_rgb,1,rad,1,1,rad)
  cc = tf.reshape(cc_o,[-1, 30*40, dia * dia, 1])
  cc_weight = tf.nn.relu(cc)

  frame1_feat_o = frame1_feat 
  frame1_feat = tf.transpose(frame1_feat,[0,3,1,2])
  frame1_feat_padded = tf.pad(frame1_feat,paddings=[[0,0],[0,0],[rad,rad],[rad,rad]])

  frame1_list = []
  for i in xrange(30):
    for j in xrange(40):
      tmp = frame1_feat_padded[:,:,0+i:2*rad+1+i,0+j:2*rad+1+j]
      tmp = tf.reshape(tmp,[-1,64,dia * dia])
      frame1_list.append(tmp)
  frame1_list = tf.stack(frame1_list,axis=2)
  frame1_list = tf.transpose(frame1_list,[0,2,3,1])

  frame1_list = frame1_list * cc_weight

  frame1_list = tf.nn.max_pool(frame1_list,ksize=[1,1,dia * dia,1],strides=[1,1,dia * dia,1],padding='VALID')
  frame1_list = tf.reshape(frame1_list,(-1,30,40,64))

  x = tf.concat([frame2_feat,frame1_feat_o,frame1_list],3)

  x_s = decoder(x)
  x_transl = tflearn.layers.conv.conv_2d(x_s,3,(3,3),strides=1,activation='linear',weight_decay=1e-3,regularizer='L2')
  rot = tflearn.layers.conv.conv_2d(x_s,3,(3,3),strides=1,activation='linear',weight_decay=1e-3,regularizer='L2')
   
  x_center = tflearn.layers.conv.conv_2d(x_s,3,(3,3),strides=1,activation='linear',weight_decay=1e-3,regularizer='L2')
  x_score = tflearn.layers.conv.conv_2d(x_s,2,(3,3),strides=1,activation='linear',weight_decay=1e-3,regularizer='L2')
  x_mask = tflearn.layers.conv.conv_2d(x_s,2,(3,3),strides=1,activation='linear',weight_decay=1e-3,regularizer='L2')
  x_boundary = tflearn.layers.conv.conv_2d(x_s,1,(3,3),strides=1,activation='linear',weight_decay=1e-3,regularizer='L2')
 
  x_center = tf.add(x_center,frame2_xyz)
  frame2_xyz_ = frame2_xyz - x_center 

  angle = tf.norm(rot,axis=3)
  angle_ = tf.expand_dims(angle,-1)
  axis = rot / (angle_ + 0.000001)
  c = tf.cos(angle)
  v = 1 -c
  s = tf.sin(angle)
  r00 = axis[:,:,:,0] * axis[:,:,:,0] * v  + c
  r01 = axis[:,:,:,0] * axis[:,:,:,1] * v - axis[:,:,:,2] * s
  r02 = axis[:,:,:,0] * axis[:,:,:,2] * v + axis[:,:,:,1] * s
  r10 = axis[:,:,:,0] * axis[:,:,:,1] * v + axis[:,:,:,2] * s
  r11 = axis[:,:,:,1] * axis[:,:,:,1] * v + c
  r12 = axis[:,:,:,1] * axis[:,:,:,2] * v - axis[:,:,:,0] * s
  r20 = axis[:,:,:,0] * axis[:,:,:,2] * v - axis[:,:,:,1] * s
  r21 = axis[:,:,:,1] * axis[:,:,:,2] * v + axis[:,:,:,0] * s
  r22 = axis[:,:,:,2] * axis[:,:,:,2] * v + c

  x = r00 * frame2_xyz_[:,:,:,0] + r01 * frame2_xyz_[:,:,:,1] + r02 * frame2_xyz_[:,:,:,2]
  y = r10 * frame2_xyz_[:,:,:,0] + r11 * frame2_xyz_[:,:,:,1] + r12 * frame2_xyz_[:,:,:,2]
  z = r20 * frame2_xyz_[:,:,:,0] + r21 * frame2_xyz_[:,:,:,1] + r22 * frame2_xyz_[:,:,:,2]

  x_flow = tf.stack((x,y,z),axis=-1)
  x_flow = x_flow + x_transl + x_center - frame2_xyz
  x_center_p = x_center + x_transl
  x_traj = tf.concat([x_center,x_center_p],3)

  return rot, x_transl, x_traj, x_flow, x_center, x_mask, x_score, x_boundary
示例#33
0
    ind = np.arange(len(actions))
    width = 0.35
    names = ["ML", "MR", "MU", "MD", "ZI", "ZO", "TF", "TB", "REC", "REAN", "ROC", "ROAN"]

    fig = plt.figure()
    ax  = fig.add_subplot(111)
    rect1 = ax.bar(ind, prob_general_1, width, facecolor = 'none', hatch = '//', label="Real")
    rect2 = ax.bar(ind+width, prob_general_2, width, facecolor='none', hatch='\\\\', label="Synthetic")
    ax.axis(ymin=0, ymax=0.5)
    ax.set_ylabel('Probability of Action')
    ax.set_xlabel('Action')
    ax.set_xticks(ind+width)
    ax.set_xticklabels(names, fontsize='small')
    ax.legend(loc="upper right")
    ax.text(5, 0.45, "correlation: %0.3f"%correlation(prob_general_1, prob_general_2), horizontalalignment='center')
    #autolabel(ax, rect1)
    #autolabel(ax, rect2)
    plt.savefig("general_prob_comp.eps")
    plt.close()

    prob_continue_1 = []
    prob_continue_2 = []
    prob_continue_1.append(m_continue_1["continue"] * 1.0 / m_continue_1["total"])
    prob_continue_2.append(m_continue_2["continue"] * 1.0 / m_continue_2["total"])
    for action in actions:
        prob_continue_1.append(m_continue_1[action] * 1.0 / m_general_1[action])
        prob_continue_2.append(m_continue_2[action] * 1.0 / m_general_2[action])
    
    ind = np.arange(len(actions)+1)
    fig = plt.figure()
示例#34
0
    rects2 = ax.bar(ind+width, prop2_list, width, facecolor='none', hatch='\\\\', label='Synthetic')

# add some
    ax.set_ylabel('Probability')
    ax.set_xlabel(axis.upper())
    #ax.set_title('Popularity of ' + axis)
    ax.set_xticks(ind+width)
    xtick_lst = []
    for v in v_list:
        if v % 2 == 0:
            xtick_lst.append(v)
        else:
            xtick_lst.append("")
    ax.set_xticklabels(xtick_lst)
    ax.axis(ymin=0, ymax=1)
    ax.text(N/2 - 1, 0.9, "correlation: %0.3f"%correlation(prop1_list, prop2_list), horizontalalignment='center')

    #ax.legend( (rects1[0], rects2[0]), ('Original', 'Generated') )
    ax.legend()

    def autolabel(rects):
        # attach some text labels
        for rect in rects:
            height = rect.get_height()
            ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%0.2f'%height, ha='center', va='bottom')

    #autolabel(rects1)
    #autolabel(rects2)
    plt.savefig(axis+".eps")

def cnnmodel(frame1_xyz, frame1_rgb, frame2_xyz, frame2_rgb):
    frame1_feat_rgb, _ = get_network('resnet50',
                                     frame1_rgb,
                                     weight_decay=1e-5,
                                     is_training=True)
    frame2_feat_rgb, _ = get_network('resnet50',
                                     frame2_rgb,
                                     weight_decay=1e-5,
                                     is_training=True,
                                     reuse=True)

    frame1_feat = encoder(frame1_xyz)
    frame2_feat = encoder(frame2_xyz, reuse=True)

    cc_o = correlation(frame2_feat_rgb, frame1_feat_rgb, 1, rad, 1, 1, rad)
    cc = tf.reshape(cc_o, [-1, 30 * 40, dia * dia, 1])
    cc_weight = tf.nn.relu(cc)

    frame1_feat_o = frame1_feat
    frame1_feat = tf.transpose(frame1_feat, [0, 3, 1, 2])
    frame1_feat_padded = tf.pad(frame1_feat,
                                paddings=[[0, 0], [0, 0], [rad, rad],
                                          [rad, rad]])
    frame1_list = []
    for i in xrange(30):
        for j in xrange(40):
            tmp = frame1_feat_padded[:, :, 0 + i:2 * rad + 1 + i,
                                     0 + j:2 * rad + 1 + j]
            tmp = tf.reshape(tmp, [-1, 64, dia * dia])
            frame1_list.append(tmp)
    frame1_list = tf.stack(frame1_list, axis=2)
    frame1_list = tf.transpose(frame1_list, [0, 2, 3, 1])

    frame1_list = frame1_list * cc_weight

    frame1_list = tf.nn.max_pool(frame1_list,
                                 ksize=[1, 1, dia * dia, 1],
                                 strides=[1, 1, dia * dia, 1],
                                 padding='VALID')
    frame1_list = tf.reshape(frame1_list, (-1, 30, 40, 64))

    x = tf.concat([frame2_feat, frame1_feat_o, frame1_list], 3)

    x_s = decoder(x)
    x_transl = tflearn.layers.conv.conv_2d(x_s,
                                           3, (3, 3),
                                           strides=1,
                                           activation='linear',
                                           weight_decay=1e-3,
                                           regularizer='L2')
    rot_quaternion = tflearn.layers.conv.conv_2d(x_s,
                                                 4, (3, 3),
                                                 strides=1,
                                                 activation='linear',
                                                 weight_decay=1e-3,
                                                 regularizer='L2')

    ### quaternion normalize
    quaternion_norm = tf.norm(rot_quaternion, axis=3) * tf.sign(
        rot_quaternion[:, :, :, 0])
    quaternion_norm = tf.expand_dims(quaternion_norm, -1)
    x_quaternion = rot_quaternion / (quaternion_norm + 0.000001)

    w1, x1, y1, z1 = tf.unstack(x_quaternion, axis=-1)
    x2, y2, z2 = tf.unstack(frame2_xyz, axis=-1)

    wm = -x1 * x2 - y1 * y2 - z1 * z2
    xm = w1 * x2 + y1 * z2 - z1 * y2
    ym = w1 * y2 + z1 * x2 - x1 * z2
    zm = w1 * z2 + x1 * y2 - y1 * x2

    x = -wm * x1 + xm * w1 - ym * z1 + zm * y1
    y = -wm * y1 + ym * w1 - zm * x1 + xm * z1
    z = -wm * z1 + zm * w1 - xm * y1 + ym * x1

    x_flow = tf.stack((x, y, z), axis=-1)
    x_flow = x_flow + x_transl - frame2_xyz

    x_center = tflearn.layers.conv.conv_2d(x_s,
                                           3, (3, 3),
                                           strides=1,
                                           activation='linear',
                                           weight_decay=1e-3,
                                           regularizer='L2')
    x_score = tflearn.layers.conv.conv_2d(x_s,
                                          2, (3, 3),
                                          strides=1,
                                          activation='linear',
                                          weight_decay=1e-3,
                                          regularizer='L2')
    x_mask = tflearn.layers.conv.conv_2d(x_s,
                                         2, (3, 3),
                                         strides=1,
                                         activation='linear',
                                         weight_decay=1e-3,
                                         regularizer='L2')
    x_boundary = tflearn.layers.conv.conv_2d(x_s,
                                             2, (3, 3),
                                             strides=1,
                                             activation='linear',
                                             weight_decay=1e-3,
                                             regularizer='L2')

    x_center = tf.add(x_center, frame2_xyz)
    xc, yc, zc = tf.unstack(x_center, axis=-1)

    wmc = -x1 * xc - y1 * yc - z1 * zc
    xmc = w1 * xc + y1 * zc - z1 * yc
    ymc = w1 * yc + z1 * xc - x1 * zc
    zmc = w1 * zc + x1 * yc - y1 * xc

    xc = -wmc * x1 + xmc * w1 - ymc * z1 + zmc * y1
    yc = -wmc * y1 + ymc * w1 - zmc * x1 + xmc * z1
    zc = -wmc * z1 + zmc * w1 - xmc * y1 + ymc * x1

    x_center_p = tf.stack((xc, yc, zc), axis=-1) + x_transl
    x_traj = tf.concat([x_center, x_center_p], 3)

    return x_quaternion, x_transl, x_traj, x_flow, x_center, x_mask, x_score, x_boundary
示例#36
0
    def call(self, left, right, training=None):
        c1a = self.conv1a(left)
        p1a = self.pool1a(c1a)
        c3a = self.conv3a(p1a)
        p3a = self.pool3a(c3a)
        c17a = self.conv17a(p3a)
        p8a = self.pool8a(c17a)
        c1b = self.conv1b(right)
        p1b = self.pool1b(c1b)
        c3b = self.conv3b(p1b)
        p3b = self.pool3b(c3b)
        c17b = self.conv17b(p3b)
        p8b = self.pool8b(c17b)

        #         c = tf.concat([p8a, p8b],axis = 3)
        #         cc = self.corr(c)

        cc = correlation(p8a, p8b)
        cc = tf.nn.leaky_relu(cc, 0.1)
        ca = self.conva(p8a)
        net = tf.concat([ca, cc], axis=3)

        c4 = self.conv4(net)
        c9 = self.conv9(c4)
        p5 = self.pool5(c9)
        c10 = self.conv10(p5)
        c11 = self.conv11(c10)
        p6 = self.pool6(c11)
        c12 = self.conv12(p6)
        c13 = self.conv13(c12)
        p7 = self.pool7(c13)
        c14 = self.conv14(p7)
        c18 = self.conv18(c14)
        u1 = self.up1(c18)
        d4 = self.deconv4(c14)
        b1 = self.bn1(d4)
        merge_2 = tf.concat([c12, b1, u1], axis=3)
        c19 = self.conv19(merge_2)
        c20 = self.conv20(c19)
        u2 = self.up2(c20)
        d5 = self.deconv5(c19)
        b2 = self.bn2(d5)
        merge_3 = tf.concat([c10, b2, u2], axis=3)
        c21 = self.conv21(merge_3)
        d24 = self.deconv24(c21)
        b3 = self.bn3(d24)
        c22 = self.conv22(c21)
        u3 = self.up3(c22)
        merge_4 = tf.concat([c4, b3, u3], axis=3)
        c23 = self.conv23(merge_4)
        c24 = self.conv24(c23)
        u4 = self.up4(c24)
        d7 = self.deconv7(c23)
        b4 = self.bn4(d7)
        #         print(p3a.shape,b4.shape,u4.shape)
        merge_5 = layers.concatenate([p3a, b4, u4],
                                     axis=3)  #([p3b,b4,u4],axis = 3)
        c25 = self.conv25(merge_5)
        c26 = self.conv26(c25)
        u5 = self.up5(c26)
        d8 = self.deconv8(c25)
        b5 = self.bn5(d8)
        merge_6 = tf.concat([p1a, b5, u5], axis=3)  #([p1b,b5,u5],axis = 3)
        c27 = self.conv27(merge_6)
        out = self.conv28(c27)
        return out, c26, c24, c22, c20
示例#37
0
文件: index.py 项目: MonMon201/IES1.2
n = 12
W = 2400
N = 1024
time = range(N)
x = rsg.getRandomSignal(n, W, N)
y = rsg.getRandomSignal(n, W, N)

list_N, list_T, list_avg = at.getAvgTime(50, n, W)
dict_N, dict_T, dict_avg = at.getAvgTime(50, n, W, True)

print('time for list - ' + str(list_avg) + ', time for dict - ' +
      str(dict_avg))

autocorrelation = corr.selfcorrelation(x)
correlation = corr.correlation(x, y)

corrR = list(range(int(N / 2)))

fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, 1)
plt.subplots_adjust(left=0.05, bottom=0.1, right=0.97, wspace=0.1)
fig.suptitle('Lab 1.2')

ax1.plot(x, color='r', label='s 1')
ax1.plot(y, color='g', label='s 2')
ax1.set_title('Generated signals')
ax1.set(xlabel='time', ylabel='generated signal')
ax1.legend()

ax2.plot(corrR, autocorrelation, color='r')
ax2.set_title('Autocorrelation (s 1)')