示例#1
0
    def gene_entropy(self, gene, log=False):
        gene_i = self.name_or_index(gene)

        gexpr = self.expr[gene_i,:]
        if log:
            gexpr = [math.log(e+1) for e in gexpr]
        gexpr = stats.normalize(gexpr)

        return stats.entropy(gexpr)
示例#2
0
    def gene_entropy(self, gene, log=False):
        gene_i = self.name_or_index(gene)

        gexpr = self.expr[gene_i, :]
        if log:
            gexpr = [math.log(e + 1) for e in gexpr]
        gexpr = stats.normalize(gexpr)

        return stats.entropy(gexpr)
示例#3
0
def run_experiment(model, test_set, average=False, phase=False):

    results = []

    for rhythms in test_set:

        if phase:

            crossH = []
            syncop = []

            for phase in range(rhythms[0].resolution()):
                
                for rhythm in rhythms:
                    rhythm.phase = phase % rhythm.resolution()

                observations = []


                # The cross entropy is the negative log likelihood. Negate to get the log likelihood
                crossH.append(-per_item_cross_entropy(model, rhythms, average=average))
                syncop.append(-sum([sum(r.syncopation) for r in rhythms]))

            for rhythm in rhythms:
                rhythm.phase = rhythm.phase + 1 % rhythm.resolution()

            phase_distribution = softmax(crossH)
            results.append(entropy(phase_distribution))

        else:
            observations = []

            for rhythm in rhythms:
                observations += model.observations(rhythm)

            cross_entropy = sum([model.cross_entropy(*obs) for obs in observations])
            cross_entropy *= len(observations) ** -1 if average else 1

            results.append(cross_entropy)

    return results
示例#4
0
 def test_entropy(self):
     self.assertEqual(2., entropy({1:5,2:4,3:1}))
     self.assertEqual(1., entropy({1:5,2:5}))
     self.assertEqual(1., entropy({1:5,2:5}, False)) 
     self.assertEqual(0., entropy({1:5}, False))
示例#5
0
def main(algorithm,
         optimizer,
         dataset,
         num_classes=10,
         optim_params={
             'lr': 0.05,
             'weight_decay': 5e-4,
             'momentum': 0.9
         }):

    filename = algorithm + '_' + optimizer + '_' + dataset

    # prepare dataset
    logger.info("====== Evaluation ======")
    logger.info("Preparing dataset...{}".format(dataset))
    db = utils.Datasets(dataset)
    train, valid, test = db.split_image_data(train_data=db.train,
                                             test_data=db.test)

    # prepare model
    model, optimizer = utils.prepare_model(algorithm, optimizer, filename,
                                           optim_params, device, num_classes)

    # get model's output
    data_size = test.dataset.data.shape[0]
    targets = test.dataset.targets if dataset == "CIFAR10" else test.dataset.labels
    predictions = torch.zeros(data_size, num_classes)
    labels = torch.zeros(data_size, 1)
    logger.info("data: {} - targets {}.".format(data_size, len(targets)))
    cum_loss = 0.0
    correct = 0.0
    n_samples = 0.0
    model.eval()
    with torch.no_grad():
        for idx, (data, target) in enumerate(test):
            start = idx * data.size(0)
            end = (idx + 1) * data.size(0)
            data, target = data.to(device), target.to(device)
            output = model(data)
            # sum up batch loss
            output = F.log_softmax(output, dim=1)
            if target.max() == 9:
                cum_loss += F.nll_loss(output, target, reduction='sum').item()
            # get the index of the max log-probability
            sftmx_probs, predicted_labels = output.max(dim=1,
                                                       keepdim=True)  # labels
            correct += (predicted_labels.view(-1) == target).sum().item()
            n_samples += len(output)
            predictions[start:end] = output
            labels[start:end] = predicted_labels
    predictions = predictions.cpu().numpy()
    labels = labels.view(-1).cpu().numpy()
    epoch_loss = cum_loss / n_samples  # avg. over all mini-batches
    epoch_acc = correct / n_samples
    logger.info("Loss = {}, Accuracy = {}, test set!".format(
        epoch_loss, epoch_acc))
    logger.info("Computing entropy on... test")
    stats.entropy(predictions, targets, filename + '_ENTROPY')
    # save model's outputs and targets valid data used in training
    logger.info("Computing calibration on... test")
    # compute and save reliability stats
    calibration = stats.calibration_curve(filename + '_ENTROPY')
    utils.save_nparray(filename + '_CALIBRATION', **calibration)
    logger.info("====== Evaluation End ======\n\n")
示例#6
0
			
				### angular offset between max of the posterior and injection
				cosDtheta = stats.cos_dtheta(est_theta, est_phi, inj_theta, inj_phi)

				### searched area
				p_value = stats.p_value(posterior, inj_theta, inj_phi, nside=nside)
				searched_area = stats.searched_area(posterior, inj_theta, inj_phi, degrees=True)

				### num_mode
				num_mode = stats.num_modes(posterior, inj_theta, inj_phi, nside=nside)

				### min{cosDtheta}
				min_cosDtheta = stats.min_cos_dtheta(posterior, inj_theta, inj_phi, nside=nside)

				### entropy
				entropy = stats.entropy(posterior)
		
				### information
				info = stats.information(posterior)

				statsfile = open(statsfilename, "w")
				print >> statsfile, "cos(ang_offset) = %.6f\nsearched_area = %.6f deg2\np_value = %.6f\nnum_mode = %d\nmin{cos(ang_offset)} = %.6f\nentropy = %.6f\ninformation = %.6f"%(cosDtheta, searched_area, p_value, num_mode, min_cosDtheta, entropy, info)
				statsfile.close()

				if opts.verbose: 
					print "\t\t", statsfilename
					print "\t\tcos(ang_offset) = %.6f\n\t\tsearched_area = %.6f deg2\np_value = %.6f\nnum_mode = %d\nmin{cos(ang_offset)} = %.6f\nentropy = %.6f\ninformation = %.6f"%(cosDtheta, searched_area, p_value, num_mode, min_cosDtheta, entropy, info)
					if opts.time: print "\t\t", time.time()-to, "sec"

		#===============================================
		# generate scatter plot
示例#7
0
 def test_entropy(self):
     self.assertEqual(2., entropy({1:5,2:4,3:1}))
     self.assertEqual(1., entropy({1:5,2:5}))
     self.assertEqual(1., entropy({1:5,2:5}, False)) 
     self.assertEqual(0., entropy({1:5}, False))
示例#8
0
    if opts.searched_area:
        if opts.Verbose:
            print "        searched_area"
        sa = stats.searched_area(post,
                                 stheta,
                                 sphi,
                                 nside=nside,
                                 degrees=opts.degrees)
        messages.append("searched_area(%s) = %.3f %s" %
                        (opts.searched_area, sa, areaunit))

    # entropy -> size
    if opts.entropy:
        if opts.Verbose:
            print "        entropy"
        entropy = pixarea * 2**(stats.entropy(post, base=2.0))
        messages.append("entropy = %.3f %s" % (entropy, areaunit))

    # CR -> size, max(dtheta)
    if opts.Verbose:
        print "        Credible Regions"
    cr = {}
    for CR, conf in zip(stats.credible_region(post, opts.credible_interval),
                        opts.credible_interval):
        if opts.Verbose:
            print "        CR : %.6f" % (conf)
        header = "%.3f %s CR" % (conf * 100, "%")
        size = pixarea * len(CR)
        messages.append("%s: size = %.3f %s" % (header, size, areaunit))

        if not opts.no_credible_interval_dtheta: