コード例 #1
0
def ellipses(sizes, correlations, mean, variance):
    correlation = Correlation()
    for size in sizes:
        for rho in correlations:
            x, y = correlation.multivariate_normal(mean, variance, rho, size)
            ellipse = Ellipse(x, y, size, rho)
            ellipse.plot()
コード例 #2
0
def print_correlations(samples, size, rho):
    print('size = ' + str(size) + ', rho = ' + str(rho))
    for sample in samples:
        print(' $ ' + str(round(Correlation.mean(sample), 4)) + ' $ ', end='&')
    print()
    for sample in samples:
        print(' $ ' + str(round(Correlation.square_mean(sample), 4)) + ' $ ',
              end='&')
    print()
    for sample in samples:
        print(' $ ' + str(round(Correlation.variance(sample), 4)) + ' $ ',
              end='&')
    print()
コード例 #3
0
    def __init__(self,
                 ip_addr='localhost',
                 num_channels=4,
                 fs=800e6,
                 logger=logging.getLogger(__name__)):
        """The interface to a ROACH cross correlator

        Keyword arguments:
        ip_addr -- IP address (or hostname) of the ROACH. (default: localhost)
        num_channels -- antennas in the correlator. (default: 4)
        fs -- sample frequency of antennas. (default 800e6; 800 MHz)
        logger -- logger to use. (default: new default logger)
        """
        self.logger = logger
        self.fpga = corr.katcp_wrapper.FpgaClient(ip_addr)
        time.sleep(0.1)
        self.num_channels = num_channels
        self.cross_combinations = list(
            itertools.combinations(
                range(num_channels),
                2))  # [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]
        self.auto_combinations = [(0, 0)]  # only 0x0 has been implemented
        self.correlations = {}
        for comb in (self.cross_combinations + self.auto_combinations):
            self.correlations[comb] = Correlation(comb, self.fpga)
            self.correlations[comb].fetch_signal(
                force=True)  # ensure populated with some data
        self.control_reg = ControlRegister(self.fpga,
                                           self.logger.getChild('control_reg'))
コード例 #4
0
    def __init__(self,
                 in_feature,
                 in_frame,
                 out_feature,
                 fusion_feature,
                 embedding_feature=128,
                 max_disp=4):
        super().__init__()
        self.fusion = DefConv2d(in_feature * in_frame, fusion_feature, 1)

        self.embedding = SimilarityEmbedding(in_feature, embedding_feature, 3)
        self.corr = Correlation(pad_size=max_disp,
                                kernel_size=1,
                                max_displacement=max_disp,
                                stride1=1,
                                stride2=1,
                                corr_multiply=1)
        n_corr_feature = (in_frame - 1) * (2 * max_disp + 1)**2

        n_feature_neck = n_corr_feature + fusion_feature
        self.shared_neck = nn.Sequential(
            DefConv2d(n_feature_neck, n_feature_neck, 3),
            DefConv2d(n_feature_neck, n_feature_neck, 3),
            DefConv2d(n_feature_neck, n_feature_neck, 3),
            DefConv2d(n_feature_neck, n_feature_neck, 3),
            DefConv2d(n_feature_neck, n_feature_neck, 3),
            DefConv2d(n_feature_neck, out_feature, 3))

        self.in_feature = in_feature
        self.in_frame = in_frame
コード例 #5
0
def ej2():
    costs = [11, 10, 14, 13, 12, 20, 21, 15, 22, 18, 19, 16]
    sales = [19, 15, 20, 14, 16, 33, 32, 18, 29, 22, 23, 20]
    correlation = Correlation(
        'Diagrama de dispersión de las ventas según los costos', {
            'xlabel': 'Costos',
            'ylabel': 'Ventas'
        })
    correlation.set_data(costs, sales)
    correlation.calc_corr_values_and_conclude()
    correlation.show_plot(25, 35)
コード例 #6
0
 def __init__(self):
     '''Get the four layer's kernels, create the correlations and
    a convolution wrapper.
    :const MIN_IMG_WIDTH: Minimum image width for which to use 
                          NumPy/SciPy for convolution
 '''
     self.kernels = DifferenceOfGaussians()
     self.correlations = Correlation(self.kernels.full_kernels)
     self.convolver = Convolution()
     self.MIN_IMG_WIDTH = 256
コード例 #7
0
def ej5():
    math = [6, 4, 8, 5, 6, 7, 5, 10, 5, 4]
    music = [2, 5, 5, 6, 7, 6, 7, 9, 10, 10]
    correlation = Correlation(
        'Diagrama de dispersión \nde las notas de 10 alumnos en matemática y en música',
        {
            'xlabel': 'Notas de matemática',
            'ylabel': 'Notas de música'
        })
    correlation.set_data(math, music)
    correlation.calc_corr_values_and_conclude()
    correlation.show_plot(10, 10)
コード例 #8
0
def ej3():
    people_with_high_pressure = [15, 13, 10, 27, 20, 5, 8, 31, 78, 22]
    over_weight = [75, 86, 88, 125, 75, 30, 47, 150, 114, 68]
    correlation = Correlation(
        'Diagrama de dispersión \nde cantidad de personas con presión arterial alta según el sobrepeso',
        {
            'xlabel': 'Cantidad de personas con presión alta',
            'ylabel': 'Sobrepeso'
        })
    correlation.set_data(people_with_high_pressure, over_weight)
    correlation.calc_corr_values_and_conclude()
    correlation.show_plot(100, 160, 20, 5)
コード例 #9
0
def ej1():
    busy_people = [1, 2, 3, 4, 5]
    task_duration = [8, 7, 5, 5, 2]
    correlation = Correlation(
        'Diagrama de dispersión \nde duración de la tarea según el número de personas ocupadas',
        {
            'xlabel': 'Personas ocupadas',
            'ylabel': 'Duración de la tarea'
        })
    correlation.set_data(busy_people, task_duration)
    correlation.calc_corr_values_and_conclude()
    correlation.show_plot(6, 10)
コード例 #10
0
def renderCorrelation():

    correlation = Correlation()

    if request.method == "GET":
        return render_template('associate.html')
    if request.method == "POST":
        correlationResults = correlation.correlateWines(
            request.form['wine_type'], 'quality',
            request.form['wine_characteristic'])
        characteristicValues = correlation.getCharacteristicValues(
            request.form['wine_type'])
        regressionLineValues = correlation.generateRegressionLine()
        return render_template(
            'association-graph.html',
            results=Markup(correlationResults),
            qualityValues=characteristicValues['quality'],
            otherValues=characteristicValues[
                request.form['wine_characteristic']],
            yAxisTitle=request.form['wine_characteristic'].title(),
            regressionValues=regressionLineValues)
コード例 #11
0
    def __init__(self,
                 ip_addr='localhost',
                 num_channels=4,
                 fs=800e6,
                 logger=logging.getLogger(__name__)):
        """The interface to a ROACH cross correlator

        Keyword arguments:
        ip_addr -- IP address (or hostname) of the ROACH. (default: localhost)
        num_channels -- antennas in the correlator. (default: 4)
        fs -- sample frequency of antennas. (default 800e6; 800 MHz)
        logger -- logger to use. (default: new default logger)
        """
        self.logger = logger
        self.fpga = corr.katcp_wrapper.FpgaClient(ip_addr)
        time.sleep(0.1)
        self.num_channels = num_channels
        self.fs = np.float64(fs)
        self.cross_combinations = list(
            itertools.combinations(
                range(num_channels),
                2))  # [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]
        self.control_register = ControlRegister(
            self.fpga, self.logger.getChild('control_reg'))
        self.set_accumulation_len(100)
        self.re_sync()
        self.control_register.allow_trigger(
        )  # necessary as Correlations auto fetch signal
        # only 0x0 has been implemented
        #self.auto_combinations = [(x, x) for x in range(num_channels)] # [(0, 0), (1, 1), (2, 2), (3, 3)]
        self.auto_combinations = [(0, 0)]
        self.frequency_correlations = {}
        for comb in (self.cross_combinations + self.auto_combinations):
            self.frequency_correlations[comb] = Correlation(
                fpga=self.fpga,
                comb=comb,
                f_start=0,
                f_stop=fs / 2,
                logger=self.logger.getChild("{a}x{b}".format(a=comb[0],
                                                             b=comb[1])))
        self.time_domain_snap = Snapshot(
            fpga=self.fpga,
            name='dram_snapshot',
            dtype=np.int8,
            cvalue=False,
            logger=self.logger.getChild('time_domain_snap'))
        self.upsample_factor = 100
        self.subsignal_length_max = 2**17
        self.time_domain_padding = 100
        self.time_domain_calibration_values = None
        self.time_domain_calibration_cable_values = None
        self.control_register.block_trigger()
コード例 #12
0
 def __init__(self, args, in_ch):
     super(LiteFlowNetCorr, self).__init__()
     self.args = args
     self.corr = Correlation(pad_size=args.search_range,
                             kernel_size=1,
                             max_displacement=args.search_range,
                             stride1=1,
                             stride2=1,
                             corr_multiply=1).cuda()
     self.flow_estimator = OpticalFlowEstimatorCorr(in_ch +
                                                    (args.search_range * 2 +
                                                     1)**2)
     self.init_weights()
コード例 #13
0
def selective_correlation(sizes, n, correlations, mean, variance):
    correlation = Correlation()
    for size in sizes:
        for rho in correlations:
            correlation_pearson_sample = []
            correlation_square_sample = []
            correlation_spearman_sample = []
            for _ in range(0, n):
                x, y = correlation.multivariate_normal(mean, variance, rho,
                                                       size)
                correlation_pearson_sample.append(
                    Correlation.pearson_correlation(x, y))
                correlation_square_sample.append(
                    Correlation.square_correlation(x, y))
            correlation_spearman_sample.append(
                Correlation.spearman_correlation(x, y))
            print_correlations([
                correlation_pearson_sample, correlation_spearman_sample,
                correlation_square_sample
            ], size, rho)
    for size in sizes:
        correlation_pearson_sample = []
        correlation_square_sample = []
        correlation_spearman_sample = []
        for _ in range(0, n):
            x, y = correlation.mixed_multivariate_normal(size)
            correlation_pearson_sample.append(
                Correlation.pearson_correlation(x, y))
            correlation_square_sample.append(
                Correlation.square_correlation(x, y))
            correlation_spearman_sample.append(
                Correlation.spearman_correlation(x, y))
        print_correlations([
            correlation_pearson_sample, correlation_spearman_sample,
            correlation_square_sample
        ], size, -1)
コード例 #14
0
    def plot(self):
        correlation = Correlation()

        pearson = correlation.pearson_correlation(self.x, self.y)
        mean_x = Correlation.mean(self.x)
        mean_y = Correlation.mean(self.y)
        variance_x = np.sqrt(Correlation.variance(self.x))
        variance_y = np.sqrt(Correlation.variance(self.y))
        ell_radius_x = np.sqrt(1 + pearson)
        ell_radius_y = np.sqrt(1 - pearson)
        alpha = 1 / 2 * np.arctan(
            (2 * pearson * variance_x * variance_y) /
            (variance_x * variance_x - variance_y * variance_y))

        alpha = alpha if alpha > 0 else alpha + np.pi / 2

        scale_x = 3 * variance_x
        scale_y = 3 * variance_y
        ellipse = Elp((mean_x, mean_y),
                      2 * ell_radius_x * scale_x,
                      2 * ell_radius_y * scale_y,
                      np.degrees(alpha),
                      facecolor='none',
                      edgecolor='red')
        fig, ax = plt.subplots()

        ax.add_artist(ellipse)
        plt.plot(self.x, self.y, 'o')
        plt.xlim(-4, 4)
        plt.ylim(-4, 4)
        plt.xlabel('x')
        plt.ylabel('y')
        plt.title('Size = ' + str(self.size) + ', rho = ' + str(self.rho))
        plt.savefig('images/' + 'Ellipse' + str(self.size) + 'r' +
                    str(self.rho) + '.png')
        plt.show()
        return
コード例 #15
0
 def main(self):
     hg = Histogram(self.options)
     hg.hist()
     cr = Correlation(self.options)
     cr.corr()
コード例 #16
0
ファイル: stereocnn.py プロジェクト: ShreyanGupta/L2LS
 def forward(self, l, r):
     phi_left = self.unary(l)
     phi_right = self.unary(r)
     corr = Correlation(self.k)(phi_left, phi_right)
     return corr
コード例 #17
0
def print_correlation(sample, name, size, rho):
    print(name + ', ' + 'size = ' + str(size) + ', ' + 'rho = ' + str(rho))
    print(Correlation.mean(sample))
    print(Correlation.square_mean(sample))
    print(Correlation.variance(sample))
    print()
コード例 #18
0
import matplotlib.pyplot as plt
from dataset import Dataset
from correlation import Correlation

selected_datase = int(input('Informe o dataset:'))
data = Dataset(selected_datase).get_dataset()
correlation_value = Correlation(data).calc()
print('Correlação')
print(correlation_value)
plt.scatter(data[0], data[1])
plt.show()
コード例 #19
0
from tqdm import tqdm
from torch.optim import lr_scheduler
from model import mainnet
from seg_dynamic import seg_dynamic
from seg_static import seg_static
from dataloader import UAVDatasetTuple
from utils import visualize_sum_testing_result
from correlation import Correlation
from auc import auc

image_saving_dir = '/home/zzhao/data/uav_regression/'

os.environ["CUDA_VISIBLE_DEVICES"] = "1"

init_cor = Correlation()
pred_cor = Correlation()


def train(model, train_loader, device, optimizer, criterion, epoch,
          batch_size):
    model.train()
    sum_running_loss = 0.0
    loss_mse = 0.0
    num_images = 0

    for batch_idx, data in enumerate(tqdm(train_loader)):
        optimizer.zero_grad()
        task = data['task'].to(device).float()
        task_label = data['task_label'].to(device).float()
        #print("task shape", task.shape)
コード例 #20
0
def val_continuous(path, model, test_loader, device, criterion, epoch,
                   batch_size):
    model.eval()
    sum_running_loss = 0.0
    prediction_output_segment = []
    label_output_segment = []
    init_output_segment = []

    with torch.no_grad():
        for batch_idx, data in enumerate(tqdm(test_loader)):

            task_label = data['task_label'].to(device).float()

            # All black
            # init = data['init']
            # init[:] = 0
            # init = init.to(device).float()

            # Normal
            init = data['init'].to(device).float()

            # print("init shape", init.shape)
            label = data['label'].to(device).float()

            prediction = np.zeros(label[:, 1, :, :].shape)

            for i in range(label.shape[1]):

                # model prediction
                if i == 0:
                    task_label_input = task_label[:, i, :, :, :]
                    init_input = init[:, i, :, :]
                    prediction = model(subx=task_label_input, mainx=init_input)
                else:
                    task_label_input = task_label[:, i, :, :, :]
                    prediction = prediction[:, None, :, :]
                    init_input = prediction

                    prediction = model(subx=task_label_input, mainx=init_input)
                # loss
                loss_mse = criterion(prediction, label[:, i, :, :].data)
                # print (loss_mse)

                # accumulate loss
                sum_running_loss += loss_mse.item() * init.size(0)

                # visualize the sum testing result
                visualize_sum_testing_result_cont(path, init_input, prediction,
                                                  task_label[:, i, :, :, :],
                                                  label[:, i, :, :].data,
                                                  batch_idx, epoch, batch_size,
                                                  i)

                prediction_temp = prediction.cpu().detach().numpy()
                label_temp = label[:, i, :, :].cpu().detach().numpy()
                init_temp = init[:, i, :, :].cpu().detach().numpy()

                # save all prediction, label, init results
                if batch_idx == 0 and i == 0:
                    prediction_output = prediction_temp
                    label_output = label_temp
                    init_output = init_temp
                else:
                    prediction_output = np.append(prediction_output,
                                                  prediction_temp,
                                                  axis=0)
                    label_output = np.append(label_output, label_temp, axis=0)
                    init_output = np.append(init_output, init_temp, axis=0)

                # save segment prediction, label, init results
                if batch_idx == 0:
                    prediction_output_segment.append(prediction_temp)
                    label_output_segment.append(label_temp)
                    init_output_segment.append(init_temp)
                else:
                    prediction_output_segment[i] = np.append(
                        prediction_output_segment[i], prediction_temp, axis=0)
                    label_output_segment[i] = np.append(
                        label_output_segment[i], label_temp, axis=0)
                    init_output_segment[i] = np.append(init_output_segment[i],
                                                       init_temp,
                                                       axis=0)

    sum_running_loss = sum_running_loss / (len(test_loader.dataset) *
                                           label.shape[1])
    print('\nTesting phase: epoch: {} Loss: {:.4f}\n'.format(
        epoch, sum_running_loss))

    # save auroc result
    # auc_path = os.path.join(path, "epoch_" + str(epoch))
    # auc(['flow'], [2, 4, 10, 100], [[label_output, prediction_output]], auc_path, epoch)

    # save correlation result
    correlation_path = path
    cor_path = os.path.join(correlation_path, "epoch_" + str(epoch))
    correlation_pred_label = pred_cor.corrcoef(
        prediction_output, label_output, cor_path,
        "correlation_{0}.png".format(epoch))
    correlation_init_label = init_cor.corrcoef(
        init_output, label_output, cor_path,
        "correlation_init_label_{0}.png".format(epoch))
    print('correlation coefficient : {0}\n'.format(correlation_pred_label))
    print('correlation_init_label coefficient : {0}\n'.format(
        correlation_init_label))

    for i in range(len(prediction_output_segment)):
        init_seg_cor = Correlation()
        pred_seg_cor = Correlation()
        correlation_pred_label = pred_seg_cor.corrcoef(
            prediction_output_segment[i], label_output_segment[i], cor_path,
            "correlation_{0}_{1}.png".format(epoch, i))
        correlation_init_label = init_seg_cor.corrcoef(
            init_output_segment[i], label_output_segment[i], cor_path,
            "correlation_init_label_{0}_{1}.png".format(epoch, i))
        print('correlation coefficient segment {0} : {1}\n'.format(
            i, correlation_pred_label))
        print('correlation_init_label coefficient segment {0} : {1}\n'.format(
            i, correlation_init_label))
    return sum_running_loss, prediction_output, label_output, init_output
コード例 #21
0
ファイル: main.py プロジェクト: mchamberland/DVH-Analytics
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Bokeh component classes
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

# Categories map of dropdown values, SQL column, and SQL table (and data source for range_categories)
categories = Categories(sources)

# Bokeh table objects
data_tables = DataTables(sources)

# Bokeh objects for each tab layout
planning_data = PlanningData(custom_title, data_tables)
roi_viewer = ROI_Viewer(sources, custom_title)
mlc_analyzer = MLC_Analyzer(sources, custom_title, data_tables)
time_series = TimeSeries(sources, categories.range, custom_title, data_tables)
correlation = Correlation(sources, categories, custom_title)
regression = Regression(sources, time_series, correlation,
                        categories.multi_var_reg_var_names, custom_title,
                        data_tables)
correlation.add_regression_link(regression)
rad_bio = RadBio(sources, time_series, correlation, regression, custom_title,
                 data_tables)
dvhs = DVHs(sources, time_series, correlation, regression, custom_title,
            data_tables)
query = Query(sources, categories, dvhs, rad_bio, roi_viewer, time_series,
              correlation, regression, mlc_analyzer, custom_title, data_tables)
dvhs.add_query_link(query)

# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Listen for changes to sources
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
コード例 #22
0
ファイル: focal.py プロジェクト: qian-liu/benchmarking
 def __init__(self):
   self.kernels = DifferenceOfGaussians()
   self.correlations = Correlation(self.kernels.full_kernels)
   self.convolver = Convolution()
   self.MIN_IMG_WIDTH = 256
コード例 #23
0
    def predict(self):
        X = self.__X_test.drop('rent_amount_boxcox',axis=1)
        y = self.__X_test['rent_amount_boxcox']
        ypred = self.__xgbRegression.predict(X)
        print('MAE:', metrics.mean_absolute_error(y, ypred))
        print('MSE:', metrics.mean_squared_error(y, ypred))
        print('RMSE:', np.sqrt(metrics.mean_squared_error(y, ypred)))
        print('r2_score:', metrics.r2_score(y, ypred))  

    """ def test(self, lambda_):
        test_data = np.array([2, 1, 4, 2, 3, 0, 1, 0, 1, 5, 3, 2, 10])
        ypred = self.__linearRegression.predict(test_data)
        scipy.special.inv_boxcox(ypred, lambda_) """


start = time.time()
clean_data = CleanData("house_price.csv")
data = clean_data.fit()
encode_data = EncodeData(data)  
data = encode_data.fit()
corr = Correlation(data)
data = corr.corr_fit()
split = SplitData(data)
X, x = split.fit()
para_x = split.getParameters()
print(para_x)
xgb = XGBReg(X, x)
xgb.fit_()
xgb.predict()

print("Total time taken:", time.time() - start)
コード例 #24
0
    def __init__(self, md=4):
        """
        input: md --- maximum displacement (for correlation. default: 4), after warpping

        """
        super(PWCDCNet, self).__init__()

        self.conv1a = conv(3, 16, kernel_size=3, stride=2)
        self.conv1aa = conv(16, 16, kernel_size=3, stride=1)
        self.conv1b = conv(16, 16, kernel_size=3, stride=1)
        self.conv2a = conv(16, 32, kernel_size=3, stride=2)
        self.conv2aa = conv(32, 32, kernel_size=3, stride=1)
        self.conv2b = conv(32, 32, kernel_size=3, stride=1)
        self.conv3a = conv(32, 64, kernel_size=3, stride=2)
        self.conv3aa = conv(64, 64, kernel_size=3, stride=1)
        self.conv3b = conv(64, 64, kernel_size=3, stride=1)
        self.conv4a = conv(64, 96, kernel_size=3, stride=2)
        self.conv4aa = conv(96, 96, kernel_size=3, stride=1)
        self.conv4b = conv(96, 96, kernel_size=3, stride=1)
        self.conv5a = conv(96, 128, kernel_size=3, stride=2)
        self.conv5aa = conv(128, 128, kernel_size=3, stride=1)
        self.conv5b = conv(128, 128, kernel_size=3, stride=1)
        self.conv6aa = conv(128, 196, kernel_size=3, stride=2)
        self.conv6a = conv(196, 196, kernel_size=3, stride=1)
        self.conv6b = conv(196, 196, kernel_size=3, stride=1)

        self.corr = Correlation(pad_size=md,
                                kernel_size=1,
                                max_displacement=md,
                                stride1=1,
                                stride2=1,
                                corr_multiply=1)
        self.leakyRELU = nn.LeakyReLU(0.1)

        nd = (2 * md + 1)**2
        dd = np.cumsum([128, 128, 96, 64, 32])

        od = nd
        self.conv6_0 = conv(od, 128, kernel_size=3, stride=1)
        self.conv6_1 = conv(od + dd[0], 128, kernel_size=3, stride=1)
        self.conv6_2 = conv(od + dd[1], 96, kernel_size=3, stride=1)
        self.conv6_3 = conv(od + dd[2], 64, kernel_size=3, stride=1)
        self.conv6_4 = conv(od + dd[3], 32, kernel_size=3, stride=1)
        self.predict_flow6 = predict_flow(od + dd[4])
        # self.deconv6 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
        self.deconv6 = deconv(4, 4, kernel_size=4, stride=2, padding=1)
        # self.upfeat6 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
        self.upfeat6 = deconv(od + dd[4],
                              4,
                              kernel_size=4,
                              stride=2,
                              padding=1)

        od = nd + 128 + 4 + 4  # 2 for up_flow6 and 2 for up_feat6
        # od = nd+128+4
        self.conv5_0 = conv(od, 128, kernel_size=3, stride=1)
        self.conv5_1 = conv(od + dd[0], 128, kernel_size=3, stride=1)
        self.conv5_2 = conv(od + dd[1], 96, kernel_size=3, stride=1)
        self.conv5_3 = conv(od + dd[2], 64, kernel_size=3, stride=1)
        self.conv5_4 = conv(od + dd[3], 32, kernel_size=3, stride=1)
        self.predict_flow5 = predict_flow(od + dd[4])
        # self.deconv5 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
        self.deconv5 = deconv(4, 4, kernel_size=4, stride=2, padding=1)
        # self.upfeat5 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
        self.upfeat5 = deconv(od + dd[4],
                              4,
                              kernel_size=4,
                              stride=2,
                              padding=1)

        od = nd + 96 + 4 + 4  # 2 for up_flow5 and 2 for up_feat5
        # od = nd+96+4
        self.conv4_0 = conv(od, 128, kernel_size=3, stride=1)
        self.conv4_1 = conv(od + dd[0], 128, kernel_size=3, stride=1)
        self.conv4_2 = conv(od + dd[1], 96, kernel_size=3, stride=1)
        self.conv4_3 = conv(od + dd[2], 64, kernel_size=3, stride=1)
        self.conv4_4 = conv(od + dd[3], 32, kernel_size=3, stride=1)
        self.predict_flow4 = predict_flow(od + dd[4])
        # self.deconv4 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
        self.deconv4 = deconv(4, 4, kernel_size=4, stride=2, padding=1)
        # self.upfeat4 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
        self.upfeat4 = deconv(od + dd[4],
                              4,
                              kernel_size=4,
                              stride=2,
                              padding=1)

        od = nd + 64 + 4 + 4  # 2 for up_flow4 and 2 for up_feat4
        # od = nd+64+4
        self.conv3_0 = conv(od, 128, kernel_size=3, stride=1)
        self.conv3_1 = conv(od + dd[0], 128, kernel_size=3, stride=1)
        self.conv3_2 = conv(od + dd[1], 96, kernel_size=3, stride=1)
        self.conv3_3 = conv(od + dd[2], 64, kernel_size=3, stride=1)
        self.conv3_4 = conv(od + dd[3], 32, kernel_size=3, stride=1)
        self.predict_flow3 = predict_flow(od + dd[4])
        # self.deconv3 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
        self.deconv3 = deconv(4, 4, kernel_size=4, stride=2, padding=1)
        # self.upfeat3 = deconv(od+dd[4], 2, kernel_size=4, stride=2, padding=1)
        self.upfeat3 = deconv(od + dd[4],
                              4,
                              kernel_size=4,
                              stride=2,
                              padding=1)

        od = nd + 32 + 4 + 4  # 2 for up_flow3 and 2 for up_feat3
        # od = nd+32+4
        self.conv2_0 = conv(od, 128, kernel_size=3, stride=1)
        self.conv2_1 = conv(od + dd[0], 128, kernel_size=3, stride=1)
        self.conv2_2 = conv(od + dd[1], 96, kernel_size=3, stride=1)
        self.conv2_3 = conv(od + dd[2], 64, kernel_size=3, stride=1)
        self.conv2_4 = conv(od + dd[3], 32, kernel_size=3, stride=1)
        self.predict_flow2 = predict_flow(od + dd[4])
        # self.deconv2 = deconv(2, 2, kernel_size=4, stride=2, padding=1)
        self.deconv2 = deconv(4, 4, kernel_size=4, stride=2, padding=1)

        self.dc_conv1 = conv(od + dd[4],
                             128,
                             kernel_size=3,
                             stride=1,
                             padding=1,
                             dilation=1)
        self.dc_conv2 = conv(128,
                             128,
                             kernel_size=3,
                             stride=1,
                             padding=2,
                             dilation=2)
        self.dc_conv3 = conv(128,
                             128,
                             kernel_size=3,
                             stride=1,
                             padding=4,
                             dilation=4)
        self.dc_conv4 = conv(128,
                             96,
                             kernel_size=3,
                             stride=1,
                             padding=8,
                             dilation=8)
        self.dc_conv5 = conv(96,
                             64,
                             kernel_size=3,
                             stride=1,
                             padding=16,
                             dilation=16)
        self.dc_conv6 = conv(64,
                             32,
                             kernel_size=3,
                             stride=1,
                             padding=1,
                             dilation=1)
        self.dc_conv7 = predict_flow(32)

        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
                nn.init.kaiming_normal(m.weight.data, mode='fan_in')
                if m.bias is not None:
                    m.bias.data.zero_()