Example #1
0
    def train(self):
        self.sess.run(tf.global_variables_initializer())
        self.itr_epoch = len(self.source_training_data[0]) // self.bs
        self.total_iteration = self.eps * self.itr_epoch

        training_acc = 0.0
        training_loss = 0.0

        for itr in range(1, self.total_iteration + 1):
            _tr_img_batch, _tr_lab_batch = init.next_batch(image=self.source_training_data[0],
                                                           label=self.source_training_data[1],
                                                           batch_size=self.bs)

            _train_accuracy, _train_loss, _ = self.sess.run([self.accuracy, self.loss, self.train_op],
                                                            feed_dict={self.x: _tr_img_batch,
                                                                       self.y: _tr_lab_batch,
                                                                       self.is_training: True})
            training_acc += _train_accuracy
            training_loss += _train_loss

            if itr % self.itr_epoch == 0:
                _current_eps = int(itr / self.itr_epoch)
                summary = self.sess.run(self.merged, feed_dict={self.x: _tr_img_batch,
                                                                self.y: _tr_lab_batch,
                                                                self.is_training: False})

                training_acc = float(training_acc / self.itr_epoch)
                training_loss = float(training_loss / self.itr_epoch)

                validation_acc, validation_loss = eval.validation_procedure(validation_data=self.source_validation_data,
                                                                            distribution_op=self.distribution,
                                                                            loss_op=self.loss, inputX=self.x,
                                                                            inputY=self.y, num_class=self.num_class,
                                                                            batch_size=self.bs,
                                                                            is_training=self.is_training,
                                                                            session=self.sess)

                log = "Epoch: [%d], Training Accuracy: [%g], Validation Accuracy: [%g], Loss Training: [%g], " \
                      "Loss_validation: [%g], Time: [%s]" % \
                      (_current_eps, training_acc, validation_acc, training_loss, validation_loss,
                       time.ctime(time.time()))

                init.save2file(log, self.ckptDir, self.model)

                self.writer.add_summary(summary, _current_eps)

                self.saver.save(self.sess, self.ckptDir + self.model + '-' + str(_current_eps))

                eval.test_procedure(test_data=self.source_test_data, distribution_op=self.distribution, inputX=self.x,
                                    inputY=self.y, mode='source', num_class=self.num_class, batch_size=self.bs,
                                    session=self.sess, is_training=self.is_training, ckptDir=self.ckptDir,
                                    model=self.model)

                eval.test_procedure(test_data=self.target_test_data, distribution_op=self.distribution, inputX=self.x,
                                    inputY=self.y, mode='target', num_class=self.num_class, batch_size=self.bs,
                                    session=self.sess, is_training=self.is_training, ckptDir=self.ckptDir,
                                    model=self.model)

                training_acc = 0.0
                training_loss = 0.0
Example #2
0
    def train_step1(self):
        self.sess.run(tf.global_variables_initializer())
        self.itr_epoch = len(self.source_training_data[0]) // self.bs

        for e in range(1, self.eps + 1):
            for itr in range(self.itr_epoch):
                feed_dict_train, feed_dict_eval = self.getBatchData_1()
                _, _, _, _ = self.sess.run([
                    self.G_trainOp, self.F_trainOp, self.DX_trainOp,
                    self.DY_trainOp
                ],
                                           feed_dict=feed_dict_train)

            summary = self.sess.run(self.merged, feed_dict=feed_dict_eval)

            G_g_loss, F_g_loss, DX_loss, DY_loss, Cycle_loss = self.sess.run(
                [
                    self.G_g_loss, self.F_g_loss, self.DX_loss, self.DY_loss,
                    self.cycle_loss
                ],
                feed_dict=feed_dict_eval)

            log1 = "Epoch: [%d], G_g_loss: [%g], F_g_loss: [%g], DX_loss: [%g], DY_loss: [%g], Cycle_loss: [%g], " \
                   "Time: [%s]" % (e, G_g_loss, F_g_loss, DX_loss, DY_loss, Cycle_loss, time.ctime(time.time()))

            init.save2file(log1, self.ckptDir, self.model)

            self.writer.add_summary(summary, e)

            self.saver.save(self.sess,
                            self.ckptDir + self.model + '-' + str(e))
Example #3
0
def main(args):
    setup_gpu(args.gpu_id)

    # Creating embedding function. This corresponds to the function g in the paper.
    # You may need to change the network parameters.
    model1 = Initialization.Create_Model()

    # size of digits 16*16
    img_rows, img_cols = 16, 16
    input_shape = (img_rows, img_cols, 1)
    input_a = Input(shape=input_shape)
    input_b = Input(shape=input_shape)

    # number of classes for digits classification
    nb_classes = 10

    # Loss = (1-alpha)Classification_Loss + (alpha)CSA
    alpha = .25

    # Having two streams. One for source and one for target.
    processed_a = model1(input_a)
    processed_b = model1(input_b)

    # Creating the prediction function. This corresponds to h in the paper.
    out1 = Dropout(0.5)(processed_a)
    out1 = Dense(nb_classes)(out1)
    out1 = Activation('softmax', name='classification')(out1)

    distance = Lambda(Initialization.euclidean_distance,
                      output_shape=Initialization.eucl_dist_output_shape,
                      name='CSA')([processed_a, processed_b])

    model = Model(inputs=[input_a, input_b], outputs=[out1, distance])
    model.compile(loss={
        'classification': 'categorical_crossentropy',
        'CSA': Initialization.contrastive_loss
    },
                  optimizer='adadelta',
                  loss_weights={
                      'classification': 1 - alpha,
                      'CSA': alpha
                  })

    print('Domain Adaptation Task: ' + args.domain_adaptation_task)
    # let's create the positive and negative pairs using row data.
    # pairs will be saved in ./pairs directory
    Initialization.Create_Pairs(args.domain_adaptation_task, args.repetition,
                                args.sample_per_class)
    Acc = Initialization.training_the_model(model, args.domain_adaptation_task,
                                            args.repetition,
                                            args.sample_per_class)

    print(
        'Best accuracy for {} target sample per class and repetition {} is {}.'
        .format(args.sample_per_class, args.repetition, Acc))
 def __init__(self,name,shape,l2reg=0,init_method='glorot_uniform'):
     '''
     目前只支持一种初始化方式
     :param shape: [in_dim,out_dim]
     :param l2reg:
     :param init_method:
     _W和_b 都是array
     '''
     self._l2reg=l2reg
     self._W = Initialization.get_global_init(init_method)(shape)
     self._b = Initialization.get_global_init('zero')(shape[1])
     self._dW=None
     self._db=None
     self._last_input=None
     self._name=name
def Main():
    """
        Main function
    """

    # Initialize game
    Initialization.ShowTitleAndRules()
    Initialization.GameInitialization()
    Train.ShowUserInterface()

    # Main game loop
    while Variables.GameInProgress:
        Train.AskUserAction()

    print("\nAu revoir.\n")
Example #6
0
 def __init__(self,node_set):
     self.node_set=node_set
     
     #Initialize all the variables
     Initialization.Initialization(self.node_set)
     
     self.solver=Solver(self.node_set)
def ShowUserInterface(ClearTheConsole = True):
    """
        This function draws the railroad on the screen
    """

    # show title and rules
    Initialization.ShowTitleAndRules(ClearTheConsole)

    # show railroad data
    print("\nTableau de bord")
    print("---------------\n")
    print(f"Longueur de la voie ferrée : {len(Variables.Railroad)}")
    print(f"Nombre de stations d'énergie : {Variables.EnergyPodNumber}, énergie consommée par déplacement à vide ({Variables.EnergyConsumptionByMovement}) puis par caisse chargée ({Variables.EnergyConsumptionByMovementByLoadedCrate}), et par caisse (dé)chargée ({Variables.EnergyConsumptionByCrate})")
    print(f"Nombre de caisses : à charger ({Variables.NumberOfCratesToLoad}) - livrées ({Variables.NumberOfCratesDelivered})")
    print(f"Train - Position : {Variables.TrainPosition} (sur {Utilities.GetSymbolName(Variables.SymbolUnderTrain)} - {Variables.SymbolUnderTrain}) - Énergie : {Variables.TrainCurrentEnergy}/{Variables.TrainMaxEnergy} - Charge : {Variables.TrainCurrentLoad}/{Variables.TrainMaxLoad}")
    print(f"Activités du train : {Variables.TrainMovements} déplacements et {Variables.TrainActions} actions")
    # show history (only the 10 last) with a slicing
    print(f"Historique des 10 dernières actions : {', '.join(Variables.InstructionsHistory[:-11:-1])}")
    print()

    # draw railroad
    # print(f"\nVoie ferrée {len(Railroad)}\n{''.join(Railroad)}\n")
    print(''.join(Variables.Railroad))

    # check victory or defeat
    CheckVictoryOrDefeat()
Example #8
0
    def build_VGAE(self):

        #Initialize Weights
        self.W_0_mu = Initialization.unif_weight_init(
            shape=[self.n_nodes, self.n_hidden])
        self.W_1_mu = Initialization.unif_weight_init(
            shape=[self.n_hidden, self.n_hidden])

        self.W_0_sigma = Initialization.unif_weight_init(
            shape=[self.n_nodes, self.n_hidden])
        self.W_1_sigma = Initialization.unif_weight_init(
            shape=[self.n_hidden, self.n_hidden])

        #Compute Graph Convolutional Layers for the mean parameter
        hidden_0_mu_ = Initialization.gcn_layer_id(self.norm_adj_mat,
                                                   self.W_0_mu)
        hidden_0_mu = tf.nn.dropout(hidden_0_mu_, self.keep_prob)
        self.mu = Initialization.gcn_layer(self.norm_adj_mat, hidden_0_mu,
                                           self.W_1_mu)

        #Compute Graph Convolutional Layers for the variance  parameter
        hidden_0_sigma_ = Initialization.gcn_layer_id(self.norm_adj_mat,
                                                      self.W_0_sigma)
        hidden_0_sigma = tf.nn.dropout(hidden_0_sigma_, self.keep_prob)
        log_sigma = Initialization.gcn_layer(self.norm_adj_mat, hidden_0_sigma,
                                             self.W_1_sigma)
        self.sigma = tf.exp(log_sigma)

        #Latent Loss  Function. It is given by the KL divergence (closed formula)
        self.latent_loss = -(0.5 / self.n_nodes) * tf.reduce_mean(
            tf.reduce_sum(
                1 + 2 * tf.log(self.sigma) - tf.square(self.mu) -
                tf.square(self.sigma), 1))

        #Reconstruction Loss. We use the weighted cross_entropy to take into account the sparsity of A
        dense_adjacency = tf.reshape(
            tf.sparse_tensor_to_dense(self.adjacency, validate_indices=False),
            self.shape)
        w_1 = (self.n_nodes * self.n_nodes -
               tf.reduce_sum(dense_adjacency)) / tf.reduce_sum(dense_adjacency)
        w_2 = self.n_nodes * self.n_nodes / (self.n_nodes * self.n_nodes -
                                             tf.reduce_sum(dense_adjacency))
        self.reconst_loss = w_2 * tf.reduce_mean(
            tf.nn.weighted_cross_entropy_with_logits(
                targets=dense_adjacency, logits=self.decode(), pos_weight=w_1))

        #Loss Function
        self.loss = self.reconst_loss + self.latent_loss

        #Optimizer
        self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
        self.train_step = self.optimizer.minimize(self.loss)

        #Variables Initializer
        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init)
Example #9
0
    def getBatchData(self):
        _src_tr_img_batch, _src_tr_lab_batch = init.next_batch(self.source_training_data[0],
                                                               self.source_training_data[1], self.bs)
        _tar_tr_img_batch = init.next_batch_unpaired(self.target_training_data, self.bs)

        feed_dict = {self.x_source: _src_tr_img_batch,
                     self.y_source: _src_tr_lab_batch,
                     self.x_target: _tar_tr_img_batch,
                     self.is_training: True,
                     self.keep_rate: 0.5}
        feed_dict_eval = {self.x_source: _src_tr_img_batch,
                          self.y_source: _src_tr_lab_batch,
                          self.x_target: _tar_tr_img_batch,
                          self.is_training: False,
                          self.keep_rate: 0.5}

        return feed_dict, feed_dict_eval
Example #10
0
    def test_procedure(self, test_data, distribution_op, inputX, inputX_,
                       inputY, mode, num_class, batch_size, session,
                       is_training, ckptDir, model):
        confusion_matrics = np.zeros([num_class, num_class], dtype="int")

        tst_batch_num = int(np.ceil(test_data[0].shape[0] / batch_size))
        for step in range(tst_batch_num):
            _testImg = test_data[0][step * batch_size:step * batch_size +
                                    batch_size]
            _testLab = test_data[1][step * batch_size:step * batch_size +
                                    batch_size]

            matrix_row, matrix_col = session.run(distribution_op,
                                                 feed_dict={
                                                     inputX: _testImg,
                                                     inputX_: _testImg,
                                                     inputY: _testLab,
                                                     is_training: False
                                                 })
            for m, n in zip(matrix_row, matrix_col):
                confusion_matrics[m][n] += 1

        test_accuracy = float(
            np.sum([confusion_matrics[q][q] for q in range(num_class)
                    ])) / float(np.sum(confusion_matrics))
        detail_test_accuracy = [
            confusion_matrics[i][i] / np.sum(confusion_matrics[i])
            for i in range(num_class)
        ]
        log0 = "Mode: " + mode
        log1 = "Test Accuracy : %g" % test_accuracy
        log2 = np.array(confusion_matrics.tolist())
        log3 = ''
        for j in range(num_class):
            log3 += 'category %s test accuracy : %g\n' % (
                init.pulmonary_category[j], detail_test_accuracy[j])
        log3 = log3[:-1]
        log4 = 'F_Value : %g\n' % eval.f_value(confusion_matrics, num_class)

        init.save2file(log0, ckptDir, model)
        init.save2file(log1, ckptDir, model)
        init.save2file(log2, ckptDir, model)
        init.save2file(log3, ckptDir, model)
        init.save2file(log4, ckptDir, model)
    def optimize(self, filename, population_size, its, recom_its, k, alpha):
        # Read distance matrix from file.
        file = open(filename)
        distanceMatrix = np.loadtxt(file, delimiter=",")
        file.close()

        # Your code here.
        population = Initialization.initialize(population_size,
                                               distanceMatrix.shape[0])
        i = 0

        while (its > i):
            old_pop = population

            # Your code here.
            offspring = np.zeros([2 * recom_its, distanceMatrix.shape[0]])
            # Recombination
            for j in range(0, 2 * recom_its, 2):
                parent1 = selection(population, k, distanceMatrix)
                parent2 = selection(population, k, distanceMatrix)
                child1, child2 = Recombination.PMX(parent1, parent2)
                offspring[j] = child1
                offspring[j + 1] = child2

            # Mutation
            for j in range(len(offspring)):
                offspring[j] = mutation(offspring[j], alpha)

            for j in range(len(population)):
                population[j] = mutation(population[j], alpha)

            # Elimination
            population = elimination(population, offspring, distanceMatrix,
                                     population_size)
            print("Score iteration {}".format(i),
                  length(population[0], distanceMatrix))

            bestSolution = population[0]
            bestObjective = length(bestSolution, distanceMatrix)
            meanObjective = np.average([
                length(individual, distanceMatrix) for individual in population
            ])
            # Call the reporter with:
            #  - the mean objective function value of the population
            #  - the best objective function value of the population
            #  - a 1D numpy array in the cycle notation containing the best solution
            #    with city numbering starting from 0
            #timeLeft = self.reporter.report(meanObjective, bestObjective, bestSolution)
            #if timeLeft < 0:
            #    break
            i += 1
            print("mean", meanObjective)
            print("ratio", convergence(old_pop, population, distanceMatrix))

        # Your code here.
        return 0
Example #12
0
 def ga_stuff(self):
     population = Initialization.initialize(population_size, lower_bound, upper_bound, self.degree + 1)
     best_forever = population[0].copy()
     for current_generation in range(generations):
         self.sort_population(population)
         best_of_generation = population[0].copy()
         best_forever = best_of_generation.copy() if self.mse(best_of_generation) < self.mse(
             best_forever) else best_forever
         Crossover.cross(population_size // 2, population, self.x, self.y)
         Mutation.mutate(population, current_generation, generations, depending_factor)
     return best_forever
Example #13
0
def main(fun):
    N = 100  # 种群规模
    NC = int(N / 5)  # 免疫搜索用的种群规模
    T = 20  # 邻域规模
    #fun = 'ZDT1'  # 测试函数DTLZ2
    f_num, x_num, x_min, x_max, PP = funcition.funcitions(fun)
    max_gen = 3000 * N  # 最大进化代数
    pc = 0.9  # 交叉概率
    w = 0.1
    pm = 1 / x_num  # 变异概率
    P, A, B, z_star, lambda_ = Initialization.init(N, T, fun, f_num, x_num,
                                                   x_min, x_max, PP)
    gen = 0
    while gen < max_gen:
        S = Immune_Search.immune_search(A, NC, N, pc, pm, x_min, x_max, f_num,
                                        fun)
        gen += len(S)
        A = Archive_Update.archive_update(S, A, N, f_num)
        # z_star =  Initialization.Caculateminobj(A, len(A), f_num)
        # d1,leader=find_leader(N,A,lambda_,z_star)
        # for i in range(N):
        #     sign1=1
        #     sign2=1
        #     pbest = A[int(leader[i])]
        #     l = random.randint(0, len(A) - 1)
        #     gbest = A[l]
        #     ll=random.randint(0,len(B[i])-1)
        #     lbest=A[int(leader[B[i][ll]])]
        #     if pbi(lbest.fitness, lambda_[i],z_star) > pbi(P[i].fitness,lambda_[i],z_star):
        #         sign1 = -1.0
        #     if pbi(gbest.fitness, lambda_[i], z_star) > pbi(P[i].fitness, lambda_[i], z_star):
        #         sign2 = -1.0
        #     v_=np.zeros(len(P[i].v))
        #     for k in range(len(P[i].x)):
        #         C1=random.random()
        #         R1=random.uniform(1.5,2.0)
        #         v_[k] = w * P[i].v[k] + C1*R1* (pbest.x[k] - P[i].x[k]) +sign1*0.5 * (lbest.x[k]-P[i].x[k])+sign2*0.5*(gbest.x[k]-P[i].x[k])
        #         if v_[k]>x_max[k]:
        #             v_[k]=x_min[k]
        #         if (v_[k])<x_min[k]:
        #             v_[k]=x_min[k]
        #     P[i].v=v_
        #     P[i].x = P[i].x + P[i].v
        #     P[i].x = np.clip(P[i].x, a_min=x_min, a_max=x_max)
        #     P[i].fitness = funcition.partical(P[i].x, fun, x_num).fitness
        #     gen += 1
        #     z_star = updatereference(P[i], f_num,z_star)
        print(gen, len(A))
        # A = Archive_Update.archive_update(P, A, N, f_num)
    show(A, f_num)
    # --------------------Coverage(C-metric)---------------------
    PP = np.loadtxt('%s.pf' % (fun))
    B = A
    print(IGD(B, PP))
Example #14
0
    def getBatchData_1(self):
        _src_tr_img_batch, _src_tr_lab_batch = init.next_batch(
            self.source_training_data[0], self.source_training_data[1],
            self.bs)
        _tar_tr_img_batch = init.next_batch_unpaired(self.target_training_data,
                                                     self.bs)

        feed_dict = {
            self.X: _src_tr_img_batch,
            self.Y: _tar_tr_img_batch,
            self.is_training: True
        }

        feed_dict_eval = {
            self.X: _src_tr_img_batch,
            self.Y: _tar_tr_img_batch,
            self.is_training: False
        }

        return feed_dict, feed_dict_eval
Example #15
0
 def select_initialization(self, initializer):
     init_object = Initialization.Initialization(self.A, self.y,
                                                 self.param.k,
                                                 self.param.data_type,
                                                 self.param.isComplex)
     if initializer in [
             'init_random', 'init_spectral', 'init_optimal_spectral'
     ]:
         init_func = getattr(init_object, initializer)
         return init_func
     else:
         print('There is no such initializer %s' % initializer)
Example #16
0
def gatherImages_Labels_NooverlapImages(rootPath, mode):
    pklFilePath = rootPath + mode + '/'

    image_label_pairs = []
    Nooverlap_images = []

    fileNameList = DA_init.getFileNameList(pklFilePath)

    for f in fileNameList:
        _img, _lab, _nooverlap_img = DA_init.loadPickle(pklFilePath, f)

        for _i, _l in zip(_img, _lab):
            image_label_pairs.append([_i, _l])
        Nooverlap_images.append(_nooverlap_img)

    sorted_pairs = DA_init.sortVariousPairs(image_label_pairs)

    img_lib, lab_lib = [], []

    for i in range(len(sorted_pairs)):
        img_lib.append(sorted_pairs[i][0])
        lab_lib.append(sorted_pairs[i][1])

    img_lib = np.array(img_lib, dtype=np.float32)
    lab_lib = np.array(lab_lib, dtype=np.int32)

    img_lib = np.expand_dims(img_lib, axis=3)
    lab_lib = DA_init.onehotEncoder(lab_lib, num_class=6)

    Nooverlap_images = np.concatenate(Nooverlap_images, axis=0)
    Nooverlap_images_lib = np.expand_dims(Nooverlap_images, axis=3)

    print('-' * 20 + mode + ' dataset process finish' + '-' * 20)
    print(
        'Mode %s image lib shape: %s label lib shape: %s nooverlap images shape: %s'
        % (mode, str(img_lib.shape), str(
            lab_lib.shape), str(Nooverlap_images_lib.shape)))

    return img_lib, lab_lib, Nooverlap_images_lib
Example #17
0
def checkLegality(chromes,PopInfors):
    matrixs = Initialization.decoding(chromes,PopInfors)
    for i in range(len(matrixs)):
        threeMatrix = matrixs[i]
        coding_matrix = threeMatrix.coding_matrix
        feature_matrix = threeMatrix.feature_matrix
        transpose_code_matrix = np.transpose(coding_matrix)
        legalityExamination(transpose_code_matrix)
        while not checkFeatureMatrixLegality(feature_matrix):
            feature_matrix = fitFeatureMatrix(feature_matrix)  
        matrixs[i].coding_matrix = (np.transpose(transpose_code_matrix)).tolist()
        matrixs[i].feature_matrix = feature_matrix
    return matrixs
Example #18
0
    def getBatchData(self):
        _src_tr_img_batch, _src_tr_lab_batch = init.next_batch(
            self.source_training_data[0], self.source_training_data[1],
            self.bs)

        feed_dict = {
            self.x_source: _src_tr_img_batch,
            self.y_source: _src_tr_lab_batch,
            self.is_training: True
        }
        feed_dict_eval = {
            self.x_source: _src_tr_img_batch,
            self.y_source: _src_tr_lab_batch,
            self.is_training: False
        }

        return feed_dict, feed_dict_eval
Example #19
0
    def getBatchData_2(self):
        _src_tr_img_batch, _src_tr_lab_batch = init.next_batch(
            self.source_training_data[0], self.source_training_data[1],
            self.bs)

        feed_dict = {
            self.cla_x: _src_tr_img_batch,
            self.direct_input_x: _src_tr_img_batch,
            self.cla_y: _src_tr_lab_batch,
            self.is_training: True
        }

        feed_dict_eval = {
            self.cla_x: _src_tr_img_batch,
            self.direct_input_x: _src_tr_img_batch,
            self.cla_y: _src_tr_lab_batch,
            self.is_training: False
        }

        return feed_dict, feed_dict_eval
Example #20
0
 def predict(self):
     '''Returns  predictions for the adjacency matrix A'''
     z = Initialization.sample_gaussian_np(self.mu_np, self.sigma_np)
     matrix_pred = np.dot(z, np.transpose(z))
     return matrix_pred
Example #21
0
 def latent(self):
     '''Returns a  sample of the latent variable Z'''
     z = Initialization.sample_gaussian_np(self.mu_np, self.sigma_np)
     return z
Example #22
0
 def encode(self):
     '''Generates a latent representation'''
     return Initialization.sample_gaussian(self.mu, self.sigma)
Example #23
0
    def evaluate(self, bi, wallet_distribution_type, sample_seed_set, ss_time):
        eva_start_time = time.time()
        ini = Initialization(self.dataset_name, self.product_name, wallet_distribution_type)

        seed_cost_dict = ini.constructSeedCostDict(self.seed_cost_option)
        graph_dict = ini.constructGraphDict(self.cascade_model)
        product_list = ini.constructProductList()[0]
        num_product = len(product_list)
        wallet_dict = ini.constructWalletDict()
        total_cost = sum(seed_cost_dict[k][i] for i in seed_cost_dict[0] for k in range(num_product))
        total_budget = round(total_cost / 2 ** bi, 4)

        eva = Evaluation(graph_dict, product_list, wallet_dict)
        print('@ evaluation @ ' + self.new_dataset_name + '_' + self.cascade_model + '_' + self.seed_cost_option +
              '\t' + self.model_name +
              '\t' + wallet_distribution_type + '_' + self.new_product_name + '_bi' + str(bi))
        sample_pnn_k = [0.0 for _ in range(num_product)]
        seed_diffusion_dict_k = {(k, s): 0 for k in range(num_product) for s in sample_seed_set[k]}

        for _ in range(self.eva_monte_carlo):
            pnn_k_list, seed_diffusion_dict = eva.getSeedSetProfit(sample_seed_set)
            sample_pnn_k = [(pnn_k + sample_pnn_k) for pnn_k, sample_pnn_k in zip(pnn_k_list, sample_pnn_k)]
            for seed_diffusion_flag in seed_diffusion_dict:
                seed_diffusion_dict_k[seed_diffusion_flag] += seed_diffusion_dict[seed_diffusion_flag]
        sample_pnn_k = [round(sample_pnn_k / self.eva_monte_carlo, 4) for sample_pnn_k in sample_pnn_k]
        sample_pro_k = [round(sample_pnn_k[k] * product_list[k][0], 4) for k in range(num_product)]
        sample_sn_k = [len(sample_sn_k) for sample_sn_k in sample_seed_set]
        sample_bud_k = [round(sum(seed_cost_dict[k][i] for i in sample_seed_set[k]), 4) for k in range(num_product)]
        sample_bud = round(sum(sample_bud_k), 4)
        sample_pro = round(sum(sample_pro_k), 4)
        seed_diffusion_list = [(seed_diffusion_flag, round(seed_diffusion_dict_k[seed_diffusion_flag] / self.eva_monte_carlo, 4)) for seed_diffusion_flag in seed_diffusion_dict_k]
        seed_diffusion_list = [(round(sd_item[1] * product_list[sd_item[0][0]][0], 4), sd_item[0], sd_item[1]) for sd_item in seed_diffusion_list]
        seed_diffusion_list = sorted(seed_diffusion_list, reverse=True)

        result = [sample_pro, sample_bud, sample_sn_k, sample_pnn_k, sample_pro_k, sample_bud_k, sample_seed_set]
        print('eva_time = ' + str(round(time.time() - eva_start_time, 2)) + 'sec')
        print(result)
        print('------------------------------------------')

        path0 = 'result/' + self.new_dataset_name + '_' + self.cascade_model + '_' + self.seed_cost_option
        if not os.path.isdir(path0):
            os.mkdir(path0)
        path = path0 + '/' + wallet_distribution_type + '_' + self.new_product_name + '_bi' + str(bi)
        if not os.path.isdir(path):
            os.mkdir(path)
        result_name = path + '/' + self.model_name + '.txt'

        fw = open(result_name, 'w')
        fw.write(self.new_dataset_name + '_' + self.cascade_model + '_' + self.seed_cost_option + '\t' +
                 self.model_name + '\t' +
                 wallet_distribution_type + '_' + self.new_product_name + '_bi' + str(bi) + '\n' +
                 'budget_limit = ' + str(total_budget) + '\n' +
                 'time = ' + str(ss_time) + '\n\n' +
                 'profit = ' + str(sample_pro) + '\n' +
                 'budget = ' + str(sample_bud) + '\n')
        fw.write('\nprofit_ratio = ')
        for kk in range(num_product):
            fw.write(str(sample_pro_k[kk]) + '\t')
        fw.write('\nbudget_ratio = ')
        for kk in range(num_product):
            fw.write(str(sample_bud_k[kk]) + '\t')
        fw.write('\nseed_number = ')
        for kk in range(num_product):
            fw.write(str(sample_sn_k[kk]) + '\t')
        fw.write('\ncustomer_number = ')
        for kk in range(num_product):
            fw.write(str(sample_pnn_k[kk]) + '\t')
        fw.write('\n\n')

        fw.write(str(sample_seed_set))
        for sd_item in seed_diffusion_list:
            fw.write('\n' + str(sd_item[1]) + '\t' + str(sd_item[0]) + '\t' + str(sd_item[2]))
        fw.close()
parser.add_argument('-batch_size', default=128, type=int)
parser.add_argument('-img_height', default=32, type=int)
parser.add_argument('-img_width', default=32, type=int)
args = parser.parse_args()

os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

if args.data_domain == 'Source':
    reloadPath = '../checkpoint/baseline_s2t_test1/baseline_s2t_test1-47'
elif args.data_domain == 'Target':
    reloadPath = '../checkpoint/baseline_t2s_test2/baseline_t2s_test2-101'
else:
    reloadPath = ''

src_data, tar_data = init.loadData(data_domain=args.data_domain)
src_training, src_validation, src_test = src_data
tar_training, tar_test = tar_data

config = tf.ConfigProto()
config.gpu_options.allow_growth = True

with tf.Session(config=config) as sess:
    res_model = nocpb_dastage1_model(model_name=args.model_name,
                                     sess=sess,
                                     train_data=[src_training, tar_training],
                                     val_data=src_validation,
                                     tst_data=[src_test, tar_test],
                                     epoch=args.epoch,
                                     restore_epoch=args.restore_epoch,
                                     reloadPath=reloadPath,
Example #25
0
 def saveConfiguration(self):
     init.save2file('epoch : %d' % self.eps, self.ckptDir, self.model)
     init.save2file('restore epoch : %d' % self.res_eps, self.ckptDir,
                    self.model)
     init.save2file('model : %s' % self.model, self.ckptDir, self.model)
     init.save2file('learning rate : %g' % self.lr, self.ckptDir,
                    self.model)
     init.save2file('lambda : %g' % self.lbd, self.ckptDir, self.model)
     init.save2file('batch size : %d' % self.bs, self.ckptDir, self.model)
     init.save2file('image height : %d' % self.img_h, self.ckptDir,
                    self.model)
     init.save2file('image width : %d' % self.img_w, self.ckptDir,
                    self.model)
     init.save2file('num class : %d' % self.num_class, self.ckptDir,
                    self.model)
Example #26
0
def _main():

    # Open files and process reads dictionary
    f = open(sys.argv[1], 'r')
    reads_dict = init._process(f)

    # Get contigs from first consensus sequence
    contigs = cs.run_consensus(reads_dict)
    contig_file = open(sys.argv[2] + '/contig.txt', 'w+')
    ll_file = open(sys.argv[2] + '/likelihood.txt', 'w+')

    # Set initial parameters
    likelihood = 0
    likelihood_new = 0
    #likelihood_list = []

    for i in range(NUM_ITERS):
        '''FILE WRITES'''
        # Contigs file write data
        contig_file.write('%s\tstart\t' % (str(i)))
        for c in contigs:
            contig_file.write('%s\t' % (str(c)))
        contig_file.write('\n')
        contig_file.flush()
        # Likelihood file write data
        ll_file.write(
            '%s\t%s\t%s\n' %
            (str(i), str(likelihood), str(len(contigs)))), ll_file.flush()
        #likelihood_list.append(float(likelihood))
        # Reads file write data
        reads_file = open(sys.argv[2] + '/reads_trial_' + str(i) + '.txt', 'w')
        for r in reads_dict:
            for l in reads_dict[r]:
                reads_file.write(
                    str(l[3]) + ',' + str(l[0]) + ',' + str(l[1]) + str(',') +
                    str(l[3]) + '\n')
        reads_file.close()
        '''COMPUTATION OF ALGORITHM'''
        # Update likelihood
        likelihood = likelihood_new
        # Map reads
        reads_dict = rm.run(reads_dict, contigs)
        # Run Consensus Sequence
        contigs = cs.run_consensus(reads_dict)
        # Print data to file
        contig_file.write('%s\tmerge\t' % (str(i)))
        for c in contigs:
            contig_file.write('%s\t' % (str(c)))
        contig_file.write('\n')
        # Run merge
        contigs, reads_dict = mc.run_merge(
            contigs, reads_dict
        )  # how do we know if a merge has happened..do we need to know?
        # Get new likelihood
        likelihood_new = ll._likelihood(reads_dict, contigs)
    '''FILE WRITES'''
    # Reads file write data
    reads_file = open(sys.argv[2] + '/reads_trial_' + str(i + 1) + '.txt', 'w')
    for r in reads_dict:
        for l in reads_dict[r]:
            reads_file.write(
                str(l[3]) + ',' + str(l[0]) + ',' + str(l[1]) + str(',') +
                str(l[3]) + '\n')
    reads_file.close()
    # Print data to file
    for c in contigs:
        contig_file.write('1000\tend\t%s\n' % (str(c)))
    ll_file.write(
        '%s\t%s\t%s\n' %
        (str(NUM_ITERS), str(likelihood), str(len(contigs)))), ll_file.flush()
Example #27
0
from mpl_toolkits.basemap import Basemap
import Initialization as init
import matplotlib.pyplot as plt
import numpy as np


profiles = init.getOriProfiles()

hour1Diagram = np.array([]) 
hour2Diagram = np.array([]) 
hour3Diagram = np.array([])

latitude = np.array([])
longitude = np.array([])

for item in profiles :
	hour1 = profiles[item][0]
	hour2 = profiles[item][1]
	hour3 = profiles[item][2]

	if hour1 != 25 : hour1Diagram = np.append(hour1Diagram, hour1)
	if hour2 != 25 : hour2Diagram = np.append(hour2Diagram, hour2)
	if hour3 != 25 : hour3Diagram = np.append(hour3Diagram, hour3)

	latitude = np.append(latitude, profiles[item][3] )
	longitude = np.append(longitude, profiles[item][4] )
 
# make sure the value of resolution is a lowercase L,
#  for 'low', not a numeral 1 parms : eck4, cyl

map = Basemap(projection='cyl', lat_0=0, lon_0=0,
Example #28
0

dataset_name = 'email' * (data_setting == 1) + 'dnc_email' * (data_setting == 2) + \
               'email_Eu_core' * (data_setting == 3) + 'NetHEPT' * (data_setting == 4)
new_dataset_name = 'email' * (data_setting == 1) + 'dnc' * (data_setting == 2) + \
                   'Eu' * (data_setting == 3) + 'Net' * (data_setting == 4)
seed_cost_option = 'dp' * (sc_option == 1) + 'd' * (sc_option == 2) + 'p' * (
    sc_option == 3)
cascade_model = 'ic' * (cm == 1) + 'wc' * (cm == 2)
product_name = 'item_lphc' * (prod_setting == 1) + 'item_hplc' * (prod_setting
                                                                  == 2)
new_product_name = 'lphc' * (prod_setting == 1) + 'hplc' * (prod_setting == 2)
wallet_distribution_type = 'm50e25' * (wd == 1) + 'm99e96' * (
    wd == 2) + 'm66e34' * (wd == 3)

ini = Initialization(dataset_name, product_name, wallet_distribution_type)
seed_cost_dict = ini.constructSeedCostDict(seed_cost_option)
wallet_dict = ini.constructWalletDict()
num_node = len(wallet_dict)
graph_dict = ini.constructGraphDict(cascade_model)
product_list, product_weight_list = ini.constructProductList()

ssmioa_model = SeedSelectionMIOA(graph_dict, seed_cost_dict, product_list,
                                 product_weight_list)
seed_mioa_dict = [{} for _ in range(num_product)]
mioa_dict = ssmioa_model.generateMIOA()
if epw_flag:
    mioa_dict = ssmioa_model.updateMIOAEPW(mioa_dict)
celf_heap = [(round((sum(mioa_dict[k][i][j][0]
                         for j in mioa_dict[k][i]) * product_list[k][0]) *
                    (1.0 if epw_flag else product_weight_list[k]), 4), k, i, 0)
Example #29
0
    veris_list_file = open(ProjectConfigFile.VERIS_LIST_FILE, 'r+')
    for line in veris_list_file:
        line = line.replace('\n', '').split(',')
        veris_list.append(
            [line[0], [float(line[1]),
                       float(line[2]),
                       float(line[3])]])


if __name__ == "__main__":
    # budget = 1497050 ###################### For 150 Assets ###############################
    # budget = 1002900 ########################## For 100 Assets ###############################
    # risk_elimination = .70
    # affordable_risk = 20069579
    ######################################### Read the threat and threat action statistics ###############################################
    Initialization.initializeEnvironment()
    # print "(Init) Threat Threat Action Asset Veris %s" % (threat_threatAction_asset_veris)
    # print "(Init) Asset List %s" % (asset_name_list)
    # print "(Init) Threat Threat Action Possible Pair %s" % (threat_threat_action_possible_pair)

    #################################################### Read The Assets ###########################
    readVerisList()
    # print "VERIS List %s" % (veris_list)
    # veris_list = [['database', [500000, 500000, 500000]], ['desktop', [100000, 100000,
    #                                                                    100000]]]  # ,['laptop',[100000,100000,100000]]]#,['end-user',[100000,100000,100000]]]
    experience_list = []
    experience_list.append([
        u'laptop_exp', [1222.0, 32345.0, 45678.0], {
            u'misuse': {
                u'net misuse': u'32'
            },
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

if args.data_domain == 'Source':
    src_name = 'source'
    tar_name = 'target'
    reload_path = '../checkpoint/baseline_s2t/baseline_s2t-300'
elif args.data_domain == 'Target':
    src_name = 'target'
    tar_name = 'source'
    reload_path = '../checkpoint/baseline_t2s/baseline_t2s-14'
else:
    src_name = ''
    tar_name = ''
    reload_path = ''

src_training = init.loadPickle(utils.experimentalPath, src_name + '_training.pkl')
src_validation = init.loadPickle(utils.experimentalPath, src_name + '_validation.pkl')
src_test = init.loadPickle(utils.experimentalPath, src_name + '_test.pkl')

tar_training = init.loadPickle(utils.experimentalPath, tar_name + '_' + src_name + '.pkl')
tar_test = init.loadPickle(utils.experimentalPath, tar_name + '_test.pkl')

src_training = utils.normalizeInput(src_training, mode='Paired')
src_validation = utils.normalizeInput(src_validation, mode='Paired')
src_test = utils.normalizeInput(src_test, mode='Paired')

tar_training = utils.normalizeInput(tar_training, mode='Unpaired')
tar_test = utils.normalizeInput(tar_test, mode='Paired')

print('source training image shape', str(src_training[0].shape))
print('source training label shape', src_training[1].shape)
Example #31
0
    def train(self):
        self.sess.run(tf.global_variables_initializer())
        self.reloadSaver = tf.train.Saver(var_list=self.g_var_reload)
        self.reloadSaver.restore(self.sess, self.reloadPath)
        print(
            'Pre-trained classification model has been successfully reloaded !'
        )

        self.itr_epoch = len(self.source_training_data[0]) // self.bs
        self.total_iteration = self.eps * self.itr_epoch

        source_training_acc = 0.0
        g_loss = 0.0
        d_loss = 0.0

        for itr in range(1, self.total_iteration + 1):
            feed_dict_train, feed_dict_eval = self.getBatchData()
            _ = self.sess.run(self.d_trainOp, feed_dict=feed_dict_train)

            feed_dict_train, feed_dict_eval = self.getBatchData()
            _ = self.sess.run(self.g_trainOp, feed_dict=feed_dict_train)

            _training_accuracy, _g_loss, _d_loss = self.sess.run(
                [self.accuracy_source, self.g_loss, self.d_loss],
                feed_dict=feed_dict_eval)

            source_training_acc += _training_accuracy
            g_loss += _g_loss
            d_loss += _d_loss

            if itr % self.itr_epoch == 0:
                _current_eps = int(itr / self.itr_epoch)
                summary = self.sess.run(self.merged, feed_dict=feed_dict_eval)

                source_training_acc = float(source_training_acc /
                                            self.itr_epoch)
                g_loss = float(g_loss / self.itr_epoch)
                d_loss = float(d_loss / self.itr_epoch)

                log = "Epoch: [%d], Training Accuracy: [%.4f], G Loss: [%.4f], D Loss: [%.4f], Time: [%s]" % (
                    _current_eps, source_training_acc, g_loss, d_loss,
                    time.ctime(time.time()))

                init.save2file(log, self.ckptDir, self.model)

                self.writer.add_summary(summary, _current_eps)

                self.saver.save(
                    self.sess,
                    self.ckptDir + self.model + '-' + str(_current_eps))

                eval.test_procedure_DA(
                    self.source_test_data,
                    distribution_op=self.distribution_source,
                    inputX=self.x_source,
                    inputY=self.y_source,
                    mode='source',
                    num_class=self.num_class,
                    batch_size=self.bs,
                    session=self.sess,
                    is_training=self.is_training,
                    ckptDir=self.ckptDir,
                    model=self.model,
                    keep_rate=self.keep_rate)
                eval.test_procedure_DA(
                    self.target_test_data,
                    distribution_op=self.distribution_target,
                    inputX=self.x_target,
                    inputY=self.y_target,
                    mode='target',
                    num_class=self.num_class,
                    batch_size=self.bs,
                    session=self.sess,
                    is_training=self.is_training,
                    ckptDir=self.ckptDir,
                    model=self.model,
                    keep_rate=self.keep_rate)

                source_training_acc = 0.0
                g_loss = 0.0
                d_loss = 0.0
def _main():

    # Open files and process reads dictionary
    f = open(sys.argv[1], 'r') 
    reads_dict = init._process(f)

    # Get contigs from first consensus sequence
    contigs = cs.run_consensus(reads_dict)
    contig_file = open(sys.argv[2] + '/contig.txt', 'w+')
    ll_file = open(sys.argv[2] + '/likelihood.txt', 'w+')

    # Set initial parameters 
    likelihood = 0
    likelihood_new = 0
    #likelihood_list = []

    for i in range(NUM_ITERS):

        '''FILE WRITES'''
        # Contigs file write data
        contig_file.write('%s\tstart\t' %(str(i)))
        for c in contigs:
            contig_file.write('%s\t' %(str(c)))
        contig_file.write('\n')
        contig_file.flush()
        # Likelihood file write data
        ll_file.write('%s\t%s\t%s\n' %(str(i), str(likelihood), str(len(contigs)))), ll_file.flush()
        #likelihood_list.append(float(likelihood))
        # Reads file write data
        reads_file = open(sys.argv[2] + '/reads_trial_' + str(i) + '.txt','w')
        for r in reads_dict:
            for l in reads_dict[r]:
                reads_file.write(str(l[3])+','+str(l[0])+','+str(l[1])+str(',')+str(l[3])+'\n')
        reads_file.close()
        '''COMPUTATION OF ALGORITHM'''
        # Update likelihood
        likelihood = likelihood_new
        # Map reads
        reads_dict = rm.run(reads_dict, contigs)
        # Run Consensus Sequence
        contigs = cs.run_consensus(reads_dict)
        # Print data to file
        contig_file.write('%s\tmerge\t' %(str(i)))
        for c in contigs:
            contig_file.write('%s\t' %(str(c)))
        contig_file.write('\n')
        # Run merge
        contigs, reads_dict = mc.run_merge(contigs,reads_dict) # how do we know if a merge has happened..do we need to know?
        # Get new likelihood
        likelihood_new = ll._likelihood(reads_dict,contigs)


    '''FILE WRITES'''
    # Reads file write data
    reads_file = open(sys.argv[2] + '/reads_trial_' + str(i+1) + '.txt','w')
    for r in reads_dict:
        for l in reads_dict[r]:
            reads_file.write(str(l[3])+','+str(l[0])+','+str(l[1])+str(',')+str(l[3])+'\n')
    reads_file.close()
    # Print data to file
    for c in contigs:
        contig_file.write('1000\tend\t%s\n' %(str(c)))
    ll_file.write('%s\t%s\t%s\n' %(str(NUM_ITERS), str(likelihood), str(len(contigs)))), ll_file.flush()