]
        # TSSPG] + chipPG  # +cagePG+metPG+chipPG

        # Generate train
        train_chrs = []
        [train_chrs.append("chr" + chr) for chr in chr_nums]
        if write_all_chrms_in_file:
            train_file_name = "training.RandOn" + str(params)
            params.out_file = output_folder + "_".join(
                train_chrs) + train_file_name
        for trainChrName in train_chrs:
            training_file_name = "training.RandOn" + trainChrName + str(
                params) + ".txt"
            # set it if you want to use all contacts of chromosome for training:
            # params.sample_size = len(params.contacts_reader.data[trainChrName])

            # if you want to use only an interval of chromosome, set its coordinates:
            params.interval = Interval(
                trainChrName,
                params.contacts_reader.get_min_contact_position(trainChrName),
                params.contacts_reader.get_max_contact_position(trainChrName))

            if not write_all_chrms_in_file:
                train_file_name = "training.RandOn" + str(params) + ".txt"
                params.out_file = output_folder + params.interval.toFileName(
                ) + train_file_name
            generate_data(params, saveFileDescription=True)
            if not write_all_chrms_in_file:
                del (params.out_file)
            del (params.sample_size)
Esempio n. 2
0
        #                  # Interval("chr10",36000000,41000000),
        #                  # Interval("chr1", 100000000, 110000000)]:
        # params.interval = interval
        validate_chrs = ["chr19", "chrX"]
        for validateChrName in validate_chrs:
            params.sample_size = len(
                params.contacts_reader.data[validateChrName])
            #print(params.sample_size)
            validation_file_name = "validatingOrient." + str(params) + ".txt"
            params.interval = Interval(
                validateChrName,
                params.contacts_reader.get_min_contact_position(
                    validateChrName),
                params.contacts_reader.get_max_contact_position(
                    validateChrName))
            logging.getLogger(
                __name__).info("Generating validation dataset for interval " +
                               str(params.interval))
            params.out_file = output_folder + params.interval.toFileName(
            ) + validation_file_name
            generate_data(params)
            del (params.out_file)
            del (params.sample_size)

        # for object in [params.contacts_reader]+params.pgs:
        #     lostInterval = Interval("chr1",103842568,104979840)
        #     object.delete_region(lostInterval)
        #     params.interval = Interval("chr1",100000000,109000000)
        #     logging.getLogger(__name__).info("Saving data to file "+params.interval.toFileName() + "DEL." + lostInterval.toFileName()+validation_file_name)
        # params.out_file = params.interval.toFileName() + "DEL." + lostInterval.toFileName()+validation_file_name
        # generate_data(params)
# Training parapmeters
data_batch_size = 32
mask_batch_size = 32
# final batch_size is data_batch_size x mask_batch_size
s = 5  # size of optimal subset that we are looking for
s_p = 2  # number of flipped bits in a mask when looking around m_opt
phase_2_start = 6000  # after how many batches phase 2 will begin
max_batches = 15000  # how many batches if the early stopping condition not satisfied
early_stopping_patience = 600  # how many patience batches (after phase 2 starts)
# before the training stops

# Generate data for XOR dataset:
# First three features are used to create the target (y)
# All the following features are gaussian noise
# In total 10 features
X_tr, y_tr = generate_data(n=N_TRAIN_SAMPLES, seed=0)
X_val, y_val = generate_data(n=N_VAL_SAMPLES, seed=0)
X_te, y_te = generate_data(n=N_TEST_SAMPLES, seed=0)

# Get one hot encoding of the labels
y_tr = get_one_hot(y_tr.astype(np.int8), 4)
y_te = get_one_hot(y_te.astype(np.int8), 4)
y_val = get_one_hot(y_val.astype(np.int8), 4)

# Create the framework, needs number of features and batch_sizes, str_id for tensorboard
fs = FeatureSelector(FEATURE_SHAPE,
                     s,
                     data_batch_size,
                     mask_batch_size,
                     str_id=dataset_label)
Esempio n. 4
0
import pickle

import matplotlib.pyplot as plt
import seaborn as sns

from DataGenerator import generate_data
from ShapeGenerator import generate_images

for i in range(1, 5):
    file_name = 'shapes_' + str(i)
    generate_images(file_name)
    generate_data(file_name)

for i in range(1, 501):
    sns.boxplot(x='Standard Deviation',
                y='Sørensen–Dice Coefficient',
                hue='Method',
                data=pickle.load(
                    open('Data/shapes_' + str(i) + '_data.pkl', 'rb')))
    plt.show()
Esempio n. 5
0
        else:
            #print("Route flow " + str(i) + " through " + str(r_star))
            pie[i] = fl[i] - (fl[i] * k_r)
            throughput = throughput + fl_e[i]

            for edge in ce:
                if edge in gre[ind]:
                    lamb[edge] = lamb[edge] + lamb[edge]*float(fl[i]*gre[ind][edge]/ce[edge]) + float(fl[i]*gre[ind][edge]/(chi*ce[edge]))

            for mbox in pm:
                if mbox in qrm[ind]:
                    theta[mbox] = theta[mbox] + theta[mbox] * float(fl[i] * qrm[ind][mbox] / pm[mbox]) + float(fl[i] * qrm[ind][
                        mbox] / (chi * pm[mbox]))

    print("Throughput: " + str(throughput))

if __name__ == '__main__':
    rls, gre, ce, fl, pm, qrm, fl_e, fl_pm = generate_data()
    # print(rls)
    # print(gre)
    # print(ce)
    # print(fl)
    # print(pm)
    # print(qrm)


    pda(rls, gre, ce, fl, pm, qrm, fl_e)
    sum = 0
    for i in range(0, len(fl_e)):
        sum = sum + fl_e[i]
    print("Maximum Throughtput: " + str(sum))