Exemplo n.º 1
0
def d_fold_cross_validation(examples,attributes,classification,d):
	'''return precsion rate with d fold cross validation'''
	rightNumber = 0
	for i in range(d):
		#testset=[]
		testset = examples[i]

		trainset=[]
		for j in range(d):
			if j==i:
				continue
			else:
				trainset.append(examples[j])

		tree = decision_tree_learning(trainset,attributes,[],classification)
		print visualize(tree)
		expected = get_expected_values(trainset, attributes, classification)
		tree = prune(tree, trainset, attributes, classification, expected)
		print "after prunning "
		print visualize(tree)
		if isRight(tree,testset):
			rightNumber+=1

	precision_rate = rightNumber/float(d)
	return precision_rate
Exemplo n.º 2
0
def checkLabels(tensor_labels):
    [_, width, height, _] = tensor_labels.shape
    out = np.zeros((width, height))
    for i in range(width):
        for j in range(height):
            out[i, j] = np.nonzero(tensor_labels[0, i, j, :])[0][0]

    visualize(out, True, '../Util/debug.png')
Exemplo n.º 3
0
    def test_visualize_with_empty_list(self) -> None:  # pylint: disable=R0201
        """ Test train_model() with no data.
        """
        # Given
        files: List[str] = []

        # When
        visualize(files)
Exemplo n.º 4
0
def ConvertColourSpace(input_image, colourspace):
    '''
    Converts an RGB image into a specified color space, visualizes the
    color channels and returns the image in its new color space.

    Colorspace options:
      opponent
      rgb -> for normalized RGB
      hsv
      ycbcr
      gray

    P.S: Do not forget the visualization part!
    '''

    # Convert the image into double precision for conversions
    input_image = input_image.astype(np.float32)

    if colourspace.lower() == 'opponent':
        # fill in the rgb2opponent function
        input_image *= 1./255
        new_image = rgbConversions.rgb2opponent(input_image)

    elif colourspace.lower() == 'rgb':
        # fill in the rgb2opponent function
        input_image *= 1./255
        new_image = rgbConversions.rgb2normedrgb(input_image)

    elif colourspace.lower() == 'hsv':
        # use built-in function from opencv
        #input_image *= 1./255
        new_image = cv2.cvtColor(input_image, cv2.COLOR_RGB2HSV)

    elif colourspace.lower() == 'ycbcr':
        # use built-in function from opencv
        input_image *= 1./255
        new_image = cv2.cvtColor(input_image, cv2.COLOR_RGB2YCrCb)
        tmp = np.zeros(new_image.shape)
        # order channels in y cb cr.
        tmp[:,:,0] = new_image[:,:,0]
        tmp[:,:,1] = new_image[:,:,2]
        tmp[:,:,2] = new_image[:,:,1]
        new_image = tmp

    elif colourspace.lower() == 'gray':
        # fill in the rgb2opponent function
        new_image = rgbConversions.rgb2grays(input_image)

    else:
        print('Error: Unknown colorspace type [%s]...' % colourspace)
        new_image = input_image

    visualize(new_image)

    return new_image
Exemplo n.º 5
0
def ConvertColourSpace(input_image, colourspace):
    '''
    Converts an RGB image into a specified color space, visualizes the
    color channels and returns the image in its new color space.

    Colorspace options:
      opponent
      rgb -> for normalized RGB
      hsv
      ycbcr
      gray

    P.S: Do not forget the visualization part!
    '''

    # Convert the image into double precision for conversions
    input_image = input_image.astype(np.float32)

    if colourspace.lower() == 'opponent':
        # fill in the rgb2opponent function
        new_image = rgbConversions.rgb2opponent(input_image)

    elif colourspace.lower() == 'rgb':
        # fill in the rgb2opponent function
        new_image = rgbConversions.rgb2normedrgb(input_image)

    elif colourspace.lower() == 'hsv':
        new_image = cv2.cvtColor(input_image, cv2.COLOR_RGB2HSV)

    elif colourspace.lower() == 'ycbcr':
        new_image = cv2.cvtColor(input_image, cv2.COLOR_RGB2YCrCb)
        Cr = np.array(new_image[:, :, 1])
        Cb = np.array(new_image[:, :, 2])
        new_image[:, :, 1] = Cb
        new_image[:, :, 2] = Cr

    elif colourspace.lower() == 'gray':
        # fill in the rgb2opponent function
        new_image = rgbConversions.rgb2grays(input_image)

    else:
        print('Error: Unknown colorspace type [%s]...' % colourspace)
        new_image = input_image

    visualize(new_image)

    return new_image
Exemplo n.º 6
0
def ConvertColourSpace(input_image, colourspace):
    '''
    Converts an RGB image into a specified color space, visualizes the
    color channels and returns the image in its new color space.

    Colorspace options:
      opponent
      rgb -> for normalized RGB
      hsv
      ycbcr
      gray

    P.S: Do not forget the visualization part!
    '''

    # Convert the image into double precision for conversions
    input_image = input_image.astype(np.float32)

    if colourspace.lower() == 'opponent':
        # fill in the rgb2opponent function
        new_image = rgbConversions.rgb2opponent(input_image)

    elif colourspace.lower() == 'rgb':
        # fill in the rgb2opponent function
        new_image = rgbConversions.rgb2normedrgb(input_image)

    elif colourspace.lower() == 'hsv':
        # use built-in function from opencv
        pass

    elif colourspace.lower() == 'ycbcr':
        # use built-in function from opencv
        pass

    elif colourspace.lower() == 'gray':
        # fill in the rgb2opponent function
        new_image = rgbConversions.rgb2grays(input_image)

    else:
        print('Error: Unknown colorspace type [%s]...' % colourspace)
        new_image = input_image

    visualize(new_image)

    return new_image
Exemplo n.º 7
0
def main():
    train_id, train_feature, train_label = read_dataset(
        os.path.join('..', 'dataset', 'Titanic', 'train.csv'))
    test_id, test_feature, _ = read_dataset(
        os.path.join('..', 'dataset', 'Titanic', 'test.csv'))
    n_train_samples = np.shape(train_id)[0]
    n_test_samples = np.shape(test_id)[0]

    all_feature = np.concatenate((train_feature, test_feature), axis=0)

    # cluster_model = sklearn.cluster.KMeans(n_clusters=2).fit(all_feature)
    # cluster_labels = cluster_model.labels_

    cluster_labels = sklearn.cluster.AgglomerativeClustering(
        n_clusters=2).fit_predict(all_feature)

    cluster_labels = np.reshape(cluster_labels, (np.size(cluster_labels), 1))
    cluster_labels_train, cluster_labels_test = cluster_labels[:n_train_samples], cluster_labels[
        n_train_samples:]

    # plt.subplot(2, 2, 1)
    # visualize((train_id, train_feature, train_label), visualize_dim, 'train data')
    plt.subplot(1, 2, 1)
    visualize((train_id, train_feature, cluster_labels_train), visualize_dim,
              'train cluster')

    test_labels = np.ones(shape=(len(test_id), 1), dtype=np.int32)
    # plt.subplot(2, 2, 3)
    # visualize((test_id, test_feature, test_labels), visualize_dim, 'test data', color='blue')
    plt.subplot(1, 2, 2)
    visualize((test_id, test_feature, cluster_labels_test), visualize_dim,
              'test cluster')

    plt.show()

    f_csv = open('submission_cluster.csv', 'w')
    f_csv.write("PassengerId,Survived" + '\n')
    for i in range(n_test_samples):
        result = cluster_labels_test[i][0]
        line = '{},{}\n'.format(test_id[i][0], result)
        f_csv.write(line)
    f_csv.close()
Exemplo n.º 8
0
def apriori(data, support, confidence):
    '''
	Prints:
	- Frequent ItemSets
	- Support
	- Confidence
	'''
    transactions, C = getTransactions(data)
    L = []

    while (len(C) > 0):
        newL = getItemsOverSupportThreshold(C, transactions, support)
        if (len(newL) < 1):
            break
        else:
            L = newL
            C = getJoin(L)

    #print(f"Frequent ItemSets: {L}")
    rules = []
    for item in L:
        rules += getRules(item)

    assocRules = []

    for rule in rules:
        conf = getConfidence(rule, transactions)
        if (conf > confidence):
            assocRules.append([set(rule[0]), set(rule[1]), conf])
            #print(f"{set(rule[0])} --> {set(rule[1])} || Confidence: {conf}")

    #print(assocRules)
    print(
        tabulate(assocRules,
                 headers=['Antecedents', 'Consequents', 'Confidence']))
    visualize(freqLookup)
    plot3d(freqLookup, supportLookup)
    def train(self, epochs: int, log_frequency: int, val_frequency: int):
        self.model.train()
        for epoch in range(epochs):
            self.model.train()
            for batch, labels in self.train_loader:
                batch = batch.to(self.device)
                labels = labels.to(self.device)
                # predictions and loss calculated
                preds = self.model.forward(batch)
                loss = self.criterion(preds, labels)
                loss.backward()
                # loss backpropogated using the optimizer
                self.optimizer.step()
                self.optimizer.zero_grad()
                # loss logged according to log frequency
                if ((self.step + 1) % log_frequency) == 0:
                    self.summary_writer.add_scalars("Loss", {"Train": loss},
                                                    self.step)

                self.step += 1
            # evaluation performed according to val frequency
            if ((epoch + 1) % val_frequency) == 0:
                self.evaluate(epoch)
                self.model.train()
            # learning rate linearly reduced
            for group in self.optimizer.param_groups:
                group['lr'] = self.learning_rates[epoch + 1]
        # convolutional filters visualized
        filters = self.model.conv1.weight.data.clone()
        grid = utils.make_grid(filters, nrow=8, normalize=True, padding=1)
        plt.figure(figsize=(8, filters.shape[0] // 8 + 1))
        plt.axis('off')
        plt.imshow(grid.cpu().numpy().transpose((1, 2, 0)))
        plt.savefig("outputs/filters.png", bbox_inches='tight', pad_inches=0)
        # predicted saliency maps visualized
        visualize("preds.pkl", "vals.pkl", "outputs")
Exemplo n.º 10
0
def makeplot():
    if request.method == "POST":
        img = io.BytesIO()
        feat = [
            request.form.get("features2", None),
            request.form.get("features1", None),
        ]
        cl = request.form.get("classifier", None)
        clf = ft.fit(feat, eval(cl), "diabetes.csv")
        fig = visualize(feat, clf, "diabetes.csv")
        canvas = FigureCanvas(fig)
        output = io.BytesIO()
        canvas.print_png(output)
        response = make_response(output.getvalue())
        response.mimetype = "image/png"
        return response
Exemplo n.º 11
0
        for b in box:
            result_txt_line += str(b) + ' '
            # print(result_txt_line)
        result_txt_line += str(word)
        result_txt_list += result_txt_line + '\n'
    with open(output_path, 'w') as out:
        out.write(result_txt_list)
        print('output_path: ', output_path, '----ok----')


if __name__ == '__main__':
    image = cv2.imread(img_path)
    img_name = img_path.split('/')[-1]
    output_txt_path = os.path.join(OUTPUT_TXT, img_name.replace('jpg', 'txt'))

    preds, boxes_list, t = model_pannet.predict(image)
    detect_pannet = pannet_json(boxes_list)
    print(detect_pannet)
    # http://service.mmlab.uit.edu.vn/receipt/task1/predict
    # http://service.aiclub.cs.uit.edu.vn/gpu150/pannet/predict
    # detect_pannet = requests.post('http://service.aiclub.cs.uit.edu.vn/gpu150/pannet/predict', files={"file": (
    #     "filename", open(img_path, "rb"), "image/jpeg")}).json()

    words_list, prob_list = load_images_to_predict(detect_pannet, image)
    detect_vietocr = vietocr_json(words_list, prob_list)
    print(detect_vietocr)
    # output_txt(detect_pannet, detect_vietocr, output_txt_path)

    input_folder_img = 'input_img_test'
    visualize(image, img_path, detect_pannet, detect_vietocr)
    # result_extract_info_txt = extract_info(input_folder_img, OUTPUT_TXT, OUTPUT_PATH_POST, 'json_visualize_path')
def train(dataGenerator, config_file):
    learning_rate = float(FLAGS.learning_rate)
    batch_norm_decay = 0.99
    num_epochs = int(FLAGS.num_epochs)
    max_out = int(FLAGS.img_output_amount)
    # Input sizes
    image_size_x = config_file['H']
    image_size_y = config_file['W']
    # Output sizes
    Hout = config_file['HOut']
    Wout = config_file['WOut']
    # Batch size
    batch_size = config_file['batchSize']
    total_batches = dataGenerator.batchAm
    # 20% van batches voor validation
    number_of_batches = int(FLAGS.num_batches)
    # Number of val batches needs to be even, because every uneven batch corresponds to the next images compared to the previous (even) batch
    number_of_val_batches= 10
    trainbool = FLAGS.train

    # Conv filter size & kernel size
    patch_size1 = 7
    patch_size2 = 5
    patch_size3 = 3
    depth1 = 64
    depth2 = 128
    depth3 = 256
    depth4 = 512
    depth5 = 1024
    depthout = 2
    upsize1 = [114,152]
    upsize2 = [228,304]

    logdir = '%sgraph' % FLAGS.dir
    savedir = '%ssaver/test' % FLAGS.dir
    checkpoint = '%ssaver/' % FLAGS.dir

    if not os.path.exists(checkpoint):
        os.makedirs(checkpoint)

    graph = tf.Graph()
    with graph.as_default():        
        input1 = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3), name='input1')
        input2 = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3), name='input2')
        phase = tf.placeholder(tf.bool, name='phase')

        global_step = tf.Variable(0, name='global_step', trainable=False)

        def model(input1, input2):
            input_concat = tf.concat([input1, input2],3)
            shape_input = input_concat.get_shape().as_list()
            print 'shape_input :', shape_input

            with tf.variable_scope('convolution_layers'):


                conv1 = tf.contrib.layers.conv2d(input_concat, num_outputs=depth1, kernel_size=patch_size1, stride=2, padding='SAME', scope='conv1')
                batch_norm_1 = tf.contrib.layers.batch_norm(conv1, decay=batch_norm_decay, is_training=phase, scope='bn1')
                batch_norm_1 = conv1
                relu1 = tf.nn.relu(batch_norm_1, 'relu1')
                shape1 = batch_norm_1.get_shape().as_list()
                print 'shape1 :',shape1

                conv2 = tf.contrib.layers.conv2d(relu1, num_outputs=depth2, kernel_size=patch_size2, stride=2, padding='SAME', scope='conv2')
                batch_norm_2 = tf.contrib.layers.batch_norm(conv2, decay=batch_norm_decay, is_training=phase, scope='bn2')
                batch_norm_2 = conv2
                relu2 = tf.nn.relu(batch_norm_2, 'relu2')
                shape2 = batch_norm_2.get_shape().as_list()
                print 'shape2 :',shape2

                conv3 = tf.contrib.layers.conv2d(relu2, num_outputs=depth3, kernel_size=patch_size3, stride=2, padding='SAME', scope='conv3')
                batch_norm_3 = tf.contrib.layers.batch_norm(conv3, decay=batch_norm_decay, is_training=phase, scope='bn3')
                batch_norm_3 = conv3
                relu3 = tf.nn.relu(batch_norm_3, 'relu3')
                shape3 = batch_norm_3.get_shape().as_list()
                print 'shape3 :',shape3

                conv4 = tf.contrib.layers.conv2d(relu3, num_outputs=depth3, kernel_size=patch_size3, stride=1, padding='SAME', scope='conv4')
                batch_norm_4 = tf.contrib.layers.batch_norm(conv4, decay=batch_norm_decay, is_training=phase, scope='bn4')
                batch_norm_4 = conv4
                relu4 = tf.nn.relu(batch_norm_4, 'relu4')
                shape4 = batch_norm_4.get_shape().as_list()
                print 'shape4 :',shape4

                conv5 = tf.contrib.layers.conv2d(relu4, num_outputs=depth4, kernel_size=patch_size3, stride=2, padding='SAME', scope='conv5')
                batch_norm_5 = tf.contrib.layers.batch_norm(conv5, decay=batch_norm_decay, is_training=phase, scope='bn5')
                batch_norm_5 = conv5
                relu5 = tf.nn.relu(batch_norm_5, 'relu5')
                shape5 = batch_norm_5.get_shape().as_list()
                print 'shape5 :',shape5

                conv6 = tf.contrib.layers.conv2d(relu5, num_outputs=depth4, kernel_size=patch_size3, stride=1, padding='SAME', scope='conv6')
                batch_norm_6 = tf.contrib.layers.batch_norm(conv6, decay=batch_norm_decay, is_training=phase, scope='bn6')
                batch_norm_6 = conv6
                relu6 = tf.nn.relu(batch_norm_6, 'relu6')
                shape6 = batch_norm_6.get_shape().as_list()
                print 'shape6 :',shape6

                conv7 = tf.contrib.layers.conv2d(relu6, num_outputs=depth5, kernel_size=patch_size3, stride=2, padding='SAME', scope='conv7')
                batch_norm_7 = tf.contrib.layers.batch_norm(conv7, decay=batch_norm_decay, is_training=phase, scope='bn7')
                batch_norm_7 = conv7
                relu7 = tf.nn.relu(batch_norm_7, 'relu7')
                shape7 = batch_norm_7.get_shape().as_list()
                print 'shape7 :',shape7

                
            with tf.variable_scope('deconvolution_layers'):

                predict_flow7 = tf.contrib.layers.conv2d(relu7, num_outputs=2, kernel_size=patch_size3, stride=1, padding='SAME', scope='conv_pf7')
                upsample_pf7 = tf.contrib.layers.conv2d_transpose(predict_flow7, num_outputs=2, kernel_size=4, stride=2, padding='SAME')
                deconv7_6 = tf.contrib.layers.conv2d_transpose(relu7, num_outputs=512, kernel_size=4, stride=2, padding='SAME')

                upsample_pf7 = tf.image.resize_images(upsample_pf7, [14,14])
                # deconv7_6 = tf.image.resize_images(deconv7_6, [13,18])

                shape8 = predict_flow7.get_shape().as_list()
                print 'predict_flow7 :',shape8
                shape9 = upsample_pf7.get_shape().as_list()
                print 'upsample_pf7 :',shape9
                shape10 = deconv7_6.get_shape().as_list()
                print 'deconv7_6 :',shape10


                concat1 = tf.concat([relu6, deconv7_6, upsample_pf7], 3)
                shape11 = concat1.get_shape().as_list()
                print 'concat1 :',shape11


                predict_flow6 = tf.contrib.layers.conv2d(concat1, num_outputs=2, kernel_size=patch_size3, stride=1, padding='SAME', scope='conv_pf6')
                upsample_pf6 = tf.contrib.layers.conv2d_transpose(predict_flow6, num_outputs=2, kernel_size=4, stride=2, padding='SAME')
                deconv6_4 = tf.contrib.layers.conv2d_transpose(concat1, num_outputs=256, kernel_size=4, stride=2, padding='SAME')

                upsample_pf6 = tf.image.resize_images(upsample_pf6, [28,28])
                # deconv6_4 = tf.image.resize_images(deconv6_4, [28,37])

                shape12 = predict_flow6.get_shape().as_list()
                print 'predict_flow6 :',shape12
                shape13 = upsample_pf6.get_shape().as_list()
                print 'upsample_pf6 :',shape13
                shape14 = deconv6_4.get_shape().as_list()
                print 'deconv6_4 :',shape14

                concat2 = tf.concat([relu4, deconv6_4, upsample_pf6], 3)
                shape15 = concat2.get_shape().as_list()
                print 'concat2 :',shape15



                predict_flow4 = tf.contrib.layers.conv2d(concat2, num_outputs=2, kernel_size=patch_size3, stride=1, padding='SAME', scope='conv_pf4')
                upsample_pf4 = tf.contrib.layers.conv2d_transpose(predict_flow4, num_outputs=2, kernel_size=4, stride=2, padding='SAME')
                deconv4_2 = tf.contrib.layers.conv2d_transpose(concat2, num_outputs=128, kernel_size=4, stride=2, padding='SAME')

                upsample_pf4 = tf.image.resize_images(upsample_pf4, [56,56])
                # deconv4_2 = tf.image.resize_images(deconv4_2, [57,76])

                shape16 = predict_flow4.get_shape().as_list()
                print 'predict_flow4 :',shape16
                shape17 = upsample_pf4.get_shape().as_list()
                print 'upsample_pf4 :',shape17
                shape18 = deconv4_2.get_shape().as_list()
                print 'deconv4_2 :',shape18

                concat3 = tf.concat([relu2, deconv4_2, upsample_pf4], 3)
                shape19 = concat3.get_shape().as_list()
                print 'concat2 :',shape19

                predict_flow = tf.contrib.layers.conv2d(concat3, num_outputs=2, kernel_size=patch_size3, stride=1, padding='SAME', scope='conv_pf')
                output = tf.image.resize_images(predict_flow, [224,224])
                shape20 = output.get_shape().as_list()
                print 'output :',shape20

            return output

        # Training
        optic = model(input1, input2)
        # visual_flow = tf.py_func(visualize,[optic], tf.float32)
        # pdb.set_trace()
        # visualoptic = visualize(optic,batch_size,image_size_x,image_size_y)

        prediction_input2 = bilinear_sampler_1d_h(input1, optic)
        difference = tf.pow(tf.abs(prediction_input2 - input2),2)
        difference_reduced = tf.reduce_sum(difference) / (Hout*Wout*batch_size)
        loss = difference_reduced
        # loss = tf.reduce_sum(difference_reduced * optic[:,:,:,0] + difference_reduced * optic[:,:,:,1]) + tf.reduce_sum(difference)
        
        # Optimizer
        # Ensure that we execute the update_ops before perfomring the train step
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            optimizer = tf.train.AdadeltaOptimizer(learning_rate) #.minimize(loss, global_step=global_step)
            # apply_gradient_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
        grads = optimizer.compute_gradients(loss)
        apply_gradient_op = optimizer.apply_gradients(grads, global_step=global_step)

        # summary
        loss_sum = tf.summary.scalar('loss', loss)
        loss_valid_sum = tf.summary.scalar('validation_loss', loss)

        img_sum_1 = tf.summary.image('img2_target', input2, max_outputs = max_out)
        img_sum_2 = tf.summary.image('img2_prediction', prediction_input2, max_outputs = max_out)
        img_sum_3 = tf.summary.image('difference', difference, max_outputs = max_out)

        img_sum_1v = tf.summary.image('v_img2_target', input2, max_outputs = max_out)
        img_sum_2v = tf.summary.image('v_img2_prediction', prediction_input2, max_outputs = max_out)
        img_sum_3v = tf.summary.image('v_difference', difference, max_outputs = max_out)

        # img_sum_4 = tf.summary.image('visual_optic_flow', visual_flow, max_outputs = max_out)

        # img_sum_4 = tf.summary.image('visualize', visual_flow, max_outputs = max_out)
        # img_pred_sum = tf.summary.image('img2_prediction', prediction_input2, max_outputs = 3)

        # merge summaries into single operation
        summary_op_1 = tf.summary.merge([loss_sum])
        summary_op_2 = tf.summary.merge([loss_valid_sum])
        summary_op_3 = tf.summary.merge([img_sum_1, img_sum_2, img_sum_3]) #img_pred_sum  ,img_sum_4
        summary_op_4 = tf.summary.merge([img_sum_1v, img_sum_2v, img_sum_3v]) #img_pred_sum  ,img_sum_4
        # summary_hist = tf.summary.histogram("weights1",kernel)

    gpu_options = tf.GPUOptions(allow_growth=True)
    tf_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_options)
    with tf.Session(graph=graph, config=tf_config) as session:
        # Create log writer
        writer = tf.summary.FileWriter(logdir, session.graph)
        # Create a saver
        saver = tf.train.Saver(max_to_keep=3)

        # Check for existing checkpoints
        ckpt = tf.train.latest_checkpoint(checkpoint)

        if ckpt == None:
            # Initialize Variables
            tf.global_variables_initializer().run()
            print 'no checkpoint found' 
            num_epochs_start = 0
        else:
            saver.restore(session,ckpt)
            print 'checkpoint restored'
            num_epochs_start = int(session.run(global_step)/number_of_batches)

        number_of_batches_start = int(session.run(global_step)) % number_of_batches


        print 'Initialized'

        if trainbool == False:
            num_epochs_start = 0
            num_epochs = 1
            number_of_batches_start = 0

        # gl_vars = [v.name for v in tf.global_variables()]
        # for k in gl_vars:
        #     print "Variable: ", k

        # for key in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES): print(key) 
        # trainable_vars = [v.name for v in tf.trainable_variables()]
        # for k in trainable_vars:
        #     print "Variable: ", k


        # _________________________________________________________________________________________________
        # _________________________________________________________________________________________________
        # ONLY 2 IMAGES
        # _________________________________________________________________________________________________
        # _________________________________________________________________________________________________
        # rgb1 = np.ones((batch_size, image_size_x, image_size_y, 3))
        # f = open('/users/start2012/r0298867/Thesis/Templates/data/image_01/0000000000.png', 'rb')
        # # resize images to 228*304
        # pilIM = im.open(f)
        # new_height = 228
        # wpercent = new_height / float(pilIM.size[1])
        # new_width = int((float(pilIM.size[0]) * float(wpercent)))
        # pilIM = pilIM.resize((new_width, new_height))
        # pilIM = pilIM.crop((0,0,304,228))
        # pilIm2 = pilIM.copy()  # PIL bug workaround
        # f.close()
        # rgb1[i, :, :, :] = np.asarray(pilIM)
        # pilIM.close()

        # rgb2 = np.ones((batch_size, image_size_x, image_size_y, 3))
        # f = open('/users/start2012/r0298867/Thesis/Templates/data/image_01/0000000001.png', 'rb')
        # # resize images to 228*304
        # pilIM = im.open(f)
        # new_height = 228
        # wpercent = new_height / float(pilIM.size[1])
        # new_width = int((float(pilIM.size[0]) * float(wpercent)))
        # pilIM = pilIM.resize((new_width, new_height))
        # pilIM = pilIM.crop((0,0,304,228))
        # pilIm2 = pilIM.copy()  # PIL bug workaround
        # f.close()
        # rgb2[i, :, :, :] = np.asarray(pilIM)
        # pilIM.close()

        # rgb1 = (rgb1 - 127.5) / 127.5
        # rgb2 = (rgb2 - 127.5) / 127.5
        # batch_data1 = rgb1
        # batch_data2 = rgb2
        # print 'test if values of rgb are between -1 and 1' # OK, tested,  min -1, max 1, mean -0.11
        # pdb.set_trace()
        # _________________________________________________________________________________________________
        # _________________________________________________________________________________________________
        

        for epoch in range(num_epochs_start,num_epochs): 
            print "EPOCH: %d" % epoch

            
            if FLAGS.kitti == False:
                range_var = range(number_of_batches_start,number_of_batches,2)
            else:
                range_var = range(number_of_batches_start,number_of_batches)
                    
            for step in range_var:
                print "we are at step: %d" % step

                # NYU_optic hdf5 dataset --------
                # Load 2 batches, only rgb images, depth is not set up in this hdf5 file
                # Batch 1 contains first image, batch 2 contains second image

                # Get training data
                if FLAGS.kitti == False:
                    in_rgb1 =  dataGenerator.__getitem__(step + number_of_val_batches)
                    in_rgb1 = (in_rgb1 - 127.5) / 127.5
                    batch_data1 = in_rgb1

                    in_rgb2 =  dataGenerator.__getitem__(step + 1 + number_of_val_batches)
                    in_rgb2 = (in_rgb2 - 127.5) / 127.5
                    batch_data2 = in_rgb2
                else:
                    in_rgb1, in_rgb2 =  dataGenerator.__getitem__(step + number_of_val_batches)
                    in_rgb1 = (in_rgb1 - 127.5) / 127.5
                    in_rgb2 = (in_rgb2 - 127.5) / 127.5
                    batch_data1 = in_rgb1
                    batch_data2 = in_rgb2

                # # Test to see if images from batch 1 and 2 correspond with each other
                # img1 = np.array(in_rgb1[0,:,:,:])
                # img2 = np.array(in_rgb2[0,:,:,:])
                # plt.figure(1)
                # plt.imshow((img1))
                # plt.figure(2)
                # plt.imshow((img2))
                # plt.show()
                # pdb.set_trace()

                # Get validation batch
                if step % 5 == 0:
                    # NYU Optic ----------
                    if FLAGS.kitti == False:
                        random_val_batch_number = random.randint(1,number_of_val_batches-2)
                        if random_val_batch_number % 2 != 0:
                            random_val_batch_number = random_val_batch_number + 1
                        val_rgb1 = dataGenerator.__getitem__(random_val_batch_number)
                        val_rgb1 = (val_rgb1 - 127.5) / 127.5
                        val_rgb2 = dataGenerator.__getitem__(random_val_batch_number+1)
                        val_rgb2 = (val_rgb2 - 127.5) / 127.5
                    # KITTY Dataset -------------
                    else:
                        random_val_batch_number = random.randint(1,number_of_val_batches-1)
                        val_rgb1, val_rgb2 = dataGenerator.__getitem__(random_val_batch_number)
                        val_rgb1 = (val_rgb1 - 127.5) / 127.5
                        val_rgb2 = (val_rgb2 - 127.5) / 127.5

                # Training = false -> Load in model and test on some data, nothing should train
                if trainbool == False:
                    # Need to load test set in here. For now its just test on trainingsdata.
                    [testdatagenerator, config_file] =  testdata.main()
                    rgb_1, rgb_2, gt = testdatagenerator.__getitem__(step)
                    rgb_1 = (rgb_1 - 127.5) / 127.5
                    rgb_2 = (rgb_2 - 127.5) / 127.5
                    gt = gt / 512

                    l, opticfl, summary, summary_imgs  = session.run([loss, optic, summary_op_1,summary_op_3], 
                        feed_dict={input1: rgb_1, input2: rgb_2, phase: 0})
                    writer.add_summary(summary, global_step=session.run(global_step))
                    writer.add_summary(summary_imgs, global_step=session.run(global_step))
                    writer.flush()
                    print 'loss: %f ' % (l)
                    visualize(opticfl,batch_size,224,224, filename='/users/start2012/r0298867/Thesis/implementation1/build_new/Optic/visualization_test.bmp')

                    print 'rmserror: ' + str(np.sqrt(np.power((np.sum(opticfl) - np.sum(gt[:,:,:,0:1])),2)/(224*224*10)))
                    print 'average endpoint error: ' + str(np.sum(np.sqrt(np.power(opticfl[:,:,:,0] - gt[:,:,:,0],2) + np.power(opticfl[:,:,:,1] - gt[:,:,:,1],2)))/(224*224*10))


                else:
                    # Normal training
                    _, l, opticfl, summary = session.run([apply_gradient_op, loss, optic, summary_op_1], #optimizer
                        feed_dict={input1: batch_data1, input2: batch_data2, phase: 1})
                    writer.add_summary(summary, global_step=session.run(global_step))
                    print 'loss: %f, step: %d, epoch: %d' % (l, step, epoch)
                    print 'min opticflow: %f, max opticflow: %f' % (np.min(opticfl), np.max(opticfl))
                    print 'mean: %f' % (np.mean(opticfl))
                    # Save validation loss every 5 steps 
                    if step % 5 == 0:
                        val_loss, valid_opticfl, summary = session.run([loss, optic, summary_op_2], 
                            feed_dict={input1: val_rgb1, input2: val_rgb2, phase: 0})
                        writer.add_summary(summary, global_step=session.run(global_step))
                        writer.flush()
                        print 'validation loss: %f, step: %d' % (val_loss, step)

                    # If step is multiple of 100 also save an image to summary and save session
                    if step % 100 == 0:
                        #Training images (Opticflow has size [1,228,304,2])
                        opticfl, summary = session.run([optic, summary_op_3], 
                            feed_dict={input1: batch_data1, input2: batch_data2, phase: 0})
                        writer.add_summary(summary, global_step=session.run(global_step))
                        writer.flush()
                        visualize(opticfl,batch_size,224,224)
                        print 'mean: ' , np.mean(opticfl)
                        print 'var: ' , np.var(opticfl)
                        # Why do I track mean and variance again? -> To see if values are close to each other (var) and not too big (mean)
                        # Technically I want a value between 0 and 1, with 1 being the a pixel moving the length of the picture

                        #Validation images
                        valid_opticfl, summary = session.run([optic, summary_op_4], 
                            feed_dict={input1: val_rgb1, input2: val_rgb2, phase: 0})
                        writer.add_summary(summary, global_step=session.run(global_step))
                        writer.flush()

                        # Save session every 100 steps
                        saver.save(session, savedir, global_step=session.run(global_step))
                        print "Model saved"

            number_of_batches_start = 0
            print 'number_of_batches_start reset to 0'
Exemplo n.º 13
0
    usData.scatterPlotData(),
    caData.scatterPlotData(),
    deData.scatterPlotData(),
    frData.scatterPlotData(),
    gbData.scatterPlotData()
]  #organizes data for scatter plots

pieData = [
    usData.category_analysis(),
    deData.category_analysis(),
    caData.category_analysis(),
    frData.category_analysis(),
    gbData.category_analysis()
]  #analyzes data for use in pie chart

bar = visualize()  #creates bar graphs
bar.bargraph(barData)

pie = visualize()  #creates pie charts
pie.piechart(pieData)

scatter = visualize()  #creates scatter plots
scatter.scatterplot(scatterData)

#testing purposes
'''
print usChannel
print "\n"
print caChannel
print "\n"
print deChannel
Exemplo n.º 14
0
def mtsp(nodes, edges, method):
    G = nx.DiGraph()
    fullGraph = graphMatrix(nodes, edges)
    nCity = int(input("Enter number of destination cities "))
    destCities = []
    print("enter the destination cities' ID ")
    i = 0
    while (i < nCity):
        dest = int(input())
        if (dest in destCities):
            print("City's already inputted")
        else:
            destCities.append(dest)
            i += 1

    startCity = int(input("Enter logistic office location "))
    while (startCity in destCities):
        print("City's already inputted")
        startCity = int(input("Enter logistic office location "))

    G.add_node(startCity, pos=(nodes.get(startCity).x, nodes.get(startCity).y))
    destCities.sort()

    nCourier = int(input("Enter number of courier available "))

    # City clustering with improved K-Means Algorithm
    # SETP 1 : Set the capacity of each cluster
    Q = math.ceil((nCity) / nCourier)

    # STEP 2 : Calculate the distance of each point to the cluster centre
    centroid = {}
    i = 0
    selected = []
    while (len(centroid) != nCourier):
        randCentroid = random.choice(destCities)
        if (not (randCentroid in selected)):
            selected.append(randCentroid)
            centroid[i + 1] = copy.deepcopy(nodes.get(randCentroid))
            i += 1

    countSame = 0
    while (countSame != nCourier):
        # STEP 3: Assign each point to cluster based on its ucledian distance to the cluster's centroid
        cityToCentroid = []
        for dest in destCities:
            for cent in centroid:
                distance = eucledian(nodes.get(dest), centroid.get(cent))
                cityToCentroid.append([distance, cent, dest])

        cityToCentroid.sort(key=lambda x: x[0])

        cityMapInClusters = {}
        for i in range(nCourier):
            cityMapInClusters[i + 1] = []

        assigned = {}
        for i in destCities:
            assigned[i] = False

        count = 0
        while (count != nCity):
            for item in (cityToCentroid):
                if (len(cityMapInClusters.get(item[1])) < Q
                        and assigned.get(item[2]) == False):
                    cityMapInClusters.get(item[1]).append(int(item[2]))
                    assigned[item[2]] = True
                    count += 1

        # Step 4 : The coordinates of the centroid is updated
        countSame = 0
        for cent in centroid:
            sumX = 0
            sumY = 0
            for i in range(len(cityMapInClusters.get(cent))):
                sumX += round(nodes.get(cityMapInClusters.get(cent)[i]).x, 3)
                sumY += round(nodes.get(cityMapInClusters.get(cent)[i]).y, 3)

            xCent = round((1 / len(cityMapInClusters.get(cent))) * sumX, 0)
            yCent = round((1 / len(cityMapInClusters.get(cent))) * sumY, 0)

            if (xCent == round(centroid.get(cent).x, 0)
                    and yCent == round(centroid.get(cent).y, 0)):
                countSame += 1

            centroid.get(cent).x = xCent
            centroid.get(cent).y = yCent

    destCities.append(startCity)
    subGraph = subGraphMatrix(fullGraph, destCities)
    print("The full subgraph: ")
    printMatrix(subGraph, destCities)
    print()
    # Searching solution for TSP of each cluster
    for tour in range(nCourier):
        n = len(cityMapInClusters.get(tour + 1))
        dest = cityMapInClusters.get(tour + 1)
        dest.sort()
        dest.append(startCity)
        places = cityMapInClusters.get(tour + 1)
        V = set(range(len(cityMapInClusters.get(tour + 1))))
        c = subGraphMatrix(fullGraph, dest)

        print("=========================================================")
        print("Tour", str(tour + 1), ":")
        print("Subgraph Matrix:")
        printMatrix(c, dest)
        print()

        # use the original BnB algorithm
        if (method == '1'):
            finalResult, finalPath = TSP(c, len(dest))

            print('route with total distance found: ', finalResult)
            print(finalPath)
            print(startCity, end="")
            subdest = []
            for i in range(1, len(finalPath)):
                print(" ->", places[finalPath[i]], end="")
                subdest.append(places[finalPath[i]])

        # TSP optimization using mip
        else:
            model = Model()
            x = [[model.add_var(var_type=BINARY) for j in V] for i in V]
            y = [model.add_var() for i in V]

            # objective function: minimize the distance
            model.objective = minimize(
                xsum(c[i][j] * x[i][j] for i in V for j in V))

            # constraint : leave each point only once
            for i in V:
                model += xsum(x[i][j] for j in V - {i}) == 1

            # constraint : enter each point only once
            for i in V:
                model += xsum(x[j][i] for j in V - {i}) == 1

            # subtour elimination
            for (i, j) in product(V - {n}, V - {n}):
                if i != j:
                    model += y[i] - (n + 1) * x[i][j] >= y[j] - n

            # optimizing
            status = model.optimize(max_seconds=30)
            print(status)

            print("=========================================================")
            print("Tour", str(tour + 1), ":")
            print("Subgraph Matrix:")
            printMatrix(c, dest)
            print("")

            # checking if a solution was found
            if model.num_solutions:
                print('route with total distance found: ',
                      model.objective_value)
                print(startCity, end="")
                nc = n
                subdest = []
                while True:
                    nc = [i for i in V if x[nc][i].x >= 0.99][0]
                    print(" ->", places[nc], end="")
                    subdest.append(places[nc])
                    if nc == n:
                        break
            else:
                print(model.objective_bound)

        print("")
        print("")

        # visualize the graph
        visualize(G, startCity, subdest, nodes, tour)

    plt.show()
Exemplo n.º 15
0
            x = x + 1
    return (x)


def is_valid_file(parser, arg):
    """ Check if file can be opened (legit path?) """
    arg = os.path.abspath(arg)
    if not os.path.exists(arg):
        parser.error("The file %s does not exist!" % arg)
    else:
        return arg


def delete_single_dir(args):
    if (args.input_folder != None):
        pass
    elif (args.input_repo != None):
        os.system("echo")
        current = (os.getcwd())
        os.system("rm -rf " + current)
    else:
        pass


if __name__ == "__main__":
    args = get_arguments()
    data = gather_data(args)
    print_to_CLI(data, args)
    visualize(data)
    delete_single_dir(args)
title = 'Cyst Phantom'

# Aspect ratio is height/width.
# The factor of 1/2 is the ratio to convert from units of time to
# depth because travel time is double the distance in the measured
# echo roundtrip.
# ratio = (c/fs/2) / (pitch/no_sub_x)
ratio = (c / fs / 2) / (pitch)

# Scale back for decimation
ratio *= decimation

fig, axes = visualize(cropped,
                      title,
                      figsize=(18, 9),
                      min_dB=-30,
                      aspect_ratio=ratio,
                      show=False)

# Set tick marks with real distances
ticks, _ = plt.xticks()
ticks = [x for x in ticks[:-1] if x >= 0]
labels = [x * pitch + x_range[0] for x in ticks]
labels = ['{:.1f}'.format(1000 * l) for l in labels]
plt.xticks(ticks, labels)
plt.xlabel('Width [mm]', fontsize=14, labelpad=15)

ticks, _ = plt.yticks()
ticks = [t for t in ticks[:-1] if t >= 0]
labels = [decimation * t / (2 * fs) * c + z_range[0] for t in ticks]
labels = ['{:.1f}'.format(1000 * l) for l in labels]
Exemplo n.º 17
0
from visualize import *

if __name__ == '__main__':
    run = visualize()
Exemplo n.º 18
0
"""
전체 프로세스를 보여주는 모듈입니다.
"""
from make_labels_true import *
from extract_features import *
from make_labels_pred import *
from evaluation import *
from visualize import *

if __name__ == '__main__':
    if os.path.exists(IMG_DIR):
        # make true labels by analysing image filename
        make_labels_true()

        # extract image features using MobileNet V2
        extract_features()

        # make cluster using K-Means algorithm
        make_labels_pred()

        # evaluate clustering result by adjusted Rand index
        evaluation()

        # visualize clustering using t-SNE
        visualize()
    else:
        print("Image dir not found.")
        pass
import visualize

from utils import load_case

from visualize import visualize

volume, segmentation = load_case("case_00001")

visualize(
    "case_00001",
    "C:/Users/nsuic/Ashfia Miss Research/Ashfia Miss Research/vis_dest/case_00002"
)
Exemplo n.º 20
0
def main():
    num_reps = 10
    minibatch_samples = 50
    train_samples = 1000
    test_samples = 100
    num_epochs = 3000
    target_noise_sigma = 0.0

    use_cuda = False

    stats_list = []
    csv_header = [
        "Rep",
        "Dropout-NLL",
        "Dropout-MSE",
        #    "Ensemble-NLL", "Ensemble-MSE",
        #    "Sigma-NLL", "Sigma-MSE",
        #    "HydraNet-NLL", "HydraNet-MSE",
        #    "HydraNet-Sigma-NLL", "HydraNet-Sigma-MSE",
    ]

    for rep in range(num_reps):

        print('Performing repetition {}/{}...'.format(rep + 1, num_reps))

        (x_train, y_train) = gen_train_data(train_samples)
        (x_test, y_test) = gen_test_data(test_samples)
        exp_data = ExperimentalData(x_train, y_train, x_test, y_test)

        #        visualize_data_only(x_train, y_train, x_test, y_test, filename='data.pdf')
        #return

        print('Starting dropout training')
        start = time.time()
        #        (dropout_model, _) = train_nn_dropout(x_train, y_train, num_epochs=num_epochs, use_cuda=use_cuda)
        (dropout_model, _) = train_nn_dropout(x_train,
                                              y_train,
                                              minibatch_samples,
                                              num_epochs=num_epochs,
                                              use_cuda=use_cuda)
        (y_pred_dropout,
         sigma_pred_dropout) = test_nn_dropout(x_test,
                                               dropout_model,
                                               use_cuda=use_cuda)
        nll_dropout = compute_nll(y_test, y_pred_dropout, sigma_pred_dropout)
        mse_dropout = compute_mse(y_test, y_pred_dropout)
        print('Dropout, NLL: {:.3f} | MSE: {:.3f}'.format(
            nll_dropout, mse_dropout))
        visualize(x_train, y_train, x_test, y_test, y_pred_dropout,
                  sigma_pred_dropout, nll_dropout, mse_dropout, rep,
                  'dropout_{}.png'.format(rep))
        end = time.time()
        print('Completed in {:.3f} seconds.'.format(end - start))
        #
        #
        #        print('Starting ensemble training')
        #
        #        start = time.time()
        #        ensemble_models = train_nn_ensemble_bootstrap(x_train, y_train, minibatch_samples, num_epochs=num_epochs, use_cuda=use_cuda, target_noise_sigma=target_noise_sigma)
        #        (y_pred_bs, sigma_pred_bs) = test_nn_ensemble_bootstrap(x_test, ensemble_models, use_cuda=use_cuda)
        #        nll_bs = compute_nll(y_test, y_pred_bs, sigma_pred_bs)
        #        mse_bs = compute_mse(y_test, y_pred_bs)
        #        print('Ensemble BS, NLL: {:.3f} | MSE: {:.3f}'.format(nll_bs, mse_bs))
        ##        visualize(x_train, y_train, x_test, y_test, y_pred_bs, sigma_pred_bs, nll_bs, mse_bs, rep,'ensemble_{}.png'.format(rep))
        #        end = time.time()
        #        print('Completed in {:.3f} seconds.'.format(end - start))

        #        print('Starting sigma training')
        #
        #        start = time.time()
        #        (sigma_model, _) = train_nn_sigma(exp_data, minibatch_samples, num_epochs=num_epochs, use_cuda=use_cuda)
        #        (y_pred_sigma, sigma_pred_sigma) = test_nn_sigma(x_test, sigma_model, use_cuda=use_cuda)
        #        nll_sigma = compute_nll(y_test, y_pred_sigma, sigma_pred_sigma)
        #        mse_sigma = compute_mse(y_test, y_pred_sigma)
        #        print('Sigma, NLL: {:.3f} | MSE: {:.3f}'.format(nll_sigma, mse_sigma))
        ##        visualize(x_train, y_train, x_test, y_test, y_pred_sigma, sigma_pred_sigma, nll_sigma, mse_sigma, rep,'sigma_{}.png'.format(rep))
        #        end = time.time()
        #        print('Completed in {:.3f} seconds.'.format(end - start))

        #        print('Starting hydranet training with target_noise_sigma={}'.format(target_noise_sigma))
        #        #
        #        start = time.time()
        #        (hydranet_model, _) = train_hydranet(exp_data, minibatch_samples, num_heads=10, num_epochs=num_epochs, use_cuda=use_cuda, target_noise_sigma=target_noise_sigma)
        #        (y_pred_hydranet, sigma_pred_hydranet) = test_hydranet(x_test, hydranet_model, use_cuda=use_cuda)
        #        nll_hydranet = compute_nll(y_test, y_pred_hydranet, sigma_pred_hydranet)
        #        mse_hydranet = compute_mse(y_test, y_pred_hydranet)
        #        print('HydraNet, NLL: {:.3f} | MSE: {:.3f}'.format(nll_hydranet, mse_hydranet))
        ##        visualize(x_train, y_train, x_test, y_test, y_pred_hydranet, sigma_pred_hydranet, nll_hydranet, mse_hydranet, rep,'hydranet_{}.png'.format(rep))
        #        end = time.time()
        #        print('Completed in {:.3f} seconds.'.format(end - start))

        #        print('Starting hydranet-sigma training')
        #        #
        #        start = time.time()
        #        (hydranetsigma_model, _) = train_hydranet_sigma(exp_data, minibatch_samples, num_heads=10, num_epochs=num_epochs, use_cuda=use_cuda, target_noise_sigma=target_noise_sigma)
        #        (y_pred_hydranetsigma, sigma_pred_hydranetsigma) = test_hydranet_sigma(x_test, hydranetsigma_model, use_cuda=use_cuda)
        #        nll_hydranetsigma = compute_nll(y_test, y_pred_hydranetsigma, sigma_pred_hydranetsigma)
        #        mse_hydranetsigma = compute_mse(y_test, y_pred_hydranetsigma)
        #        print('HydraNet-Sigma, NLL: {:.3f} | MSE: {:.3f}'.format(nll_hydranetsigma, mse_hydranetsigma))
        ##        visualize(x_train, y_train, x_test, y_test, y_pred_hydranetsigma, sigma_pred_hydranetsigma, nll_hydranetsigma, mse_hydranetsigma, rep,'hydranetsigma_{}.png'.format(rep))
        #        end = time.time()
        #        print('Completed in {:.3f} seconds.'.format(end - start))

        #        stats_list.append([rep, nll_dropout, mse_dropout])

        #        stats_list.append([rep, nll_dropout, mse_dropout, nll_bs, mse_bs, nll_sigma, mse_sigma, nll_hydranet, mse_hydranet, nll_hydranetsigma, mse_hydranetsigma])

        output_mat = {
            'x_train': x_train,
            'x_test': x_test,
            'y_train': y_train,
            'y_test': y_test,
            'y_pred_dropout': y_pred_dropout,
            #                      'y_pred_sigma': y_pred_sigma,
            #                      'y_pred_bs': y_pred_bs,
            #                      'y_pred_hydranet': y_pred_hydranet,
            #                      'y_pred_hydranetsigma': y_pred_hydranetsigma,
            'sigma_pred_dropout': sigma_pred_dropout,
            #                      'sigma_pred_sigma': sigma_pred_sigma,
            #                      'sigma_pred_bs': sigma_pred_bs,
            #                      'sigma_pred_hydranet': sigma_pred_hydranet,
            #                      'sigma_pred_hydranetsigma': sigma_pred_hydranetsigma,
            'nll_dropout': nll_dropout,
            #                      'nll_sigma': nll_sigma,
            #                      'nll_bs': nll_bs,
            #                      'nll_hydranet': nll_hydranet,
            #                      'nll_hydranetsigma': nll_hydranetsigma,
            'mse_dropout': nll_dropout,
            #                      'mse_sigma': mse_sigma,
            #                      'mse_bs': nll_bs,
            #                      'mse_hydranet': nll_hydranet,
            #                      'mse_hydranetsigma': nll_hydranetsigma,
            'rep': rep,
            'sigma_n': target_noise_sigma,
        }
        torch.save(
            output_mat, 'figs/dropout_experiment_run_{}_noise_{}.pt'.format(
                rep, target_noise_sigma))
Exemplo n.º 21
0
from readarff import *
from tree import *
from visualize import *
from decisiontree import *
from importance import *
from utils import argmax
from math import log
from prune import *

examples, attributes, classes = read_arff('restaurant-domain.arff')

tsttree = decision_tree_learning(examples, attributes, [], classes)
print 'before pruning'
visualize(tsttree)
expected = get_expected_values(examples, attributes, classes)
tsttree = prune(tsttree, examples, attributes, classes, expected)
print;print
print 'after pruning'
visualize(tsttree)