示例#1
0
文件: model.py 项目: yfyh2013/combo
 def __init__(self, lik, mean, cov, inf='exact'):
     self.lik = lik
     self.prior = prior(mean=mean, cov=cov)
     self.inf = inf
     self.num_params = self.lik.num_params + self.prior.num_params
     self.params = self.cat_params(self.lik.params, self.prior.params)
     self.stats = ()
示例#2
0
    def __call__(self, x, pks={}, verbose=False, r=None):
        if r is None:
            r = self.base_r
        prior_value = prior(x,
                            p=self.p,
                            use_skewed_distr=self.use_skewed_distr,
                            pks=pks,
                            use_uniform_prior=self.use_uniform_prior,
                            unadmixed_populations=self.unadmixed_populations,
                            r=r)
        if prior_value == -float('inf'):
            return -float('inf'), prior_value

        likelihood_value = self.lik(x,
                                    self.emp_cov,
                                    self.b,
                                    self.M,
                                    nodes=self.nodes,
                                    pks=pks)
        if verbose:
            print 'empirical_matrix=', self.emp_cov
            print 'input_matrix=', pks['covariance'] + x[1]
        pks['prior'] = prior_value
        pks['likelihood'] = likelihood_value
        #pks['posterior']=prior_value+likelihood_value
        return likelihood_value, prior_value
示例#3
0
文件: model.py 项目: tsudalab/combo
 def __init__(self, lik, mean, cov, inf="exact"):
     self.lik = lik
     self.prior = prior(mean=mean, cov=cov)
     self.inf = inf
     self.num_params = self.lik.num_params + self.prior.num_params
     self.params = self.cat_params(self.lik.params, self.prior.params)
     self.stats = ()
示例#4
0
 def posterior(x, pks={}):
     #print tot_branch_length
     prior_value = prior(x, p=p, pks=pks)
     if prior_value == -float('inf'):
         return prior_value
     pks['prior'] = prior_value
     pks['likelihood'] = 0
     return 0, prior_value
示例#5
0
    def __call__(self, params, dtype=np.double):
        q, beta, k, c1, c2, c3, deq, deqq, diq, delta, gamma, E0, I0 = params

        #from math import log

        return (prior.prior(q, beta, k, c1, c2, c3, deq, deqq, diq, delta,
                            gamma, E0, I0) +
                likelihood.likelihood(q, beta, k, c1, c2, c3, deq, deqq, diq,
                                      delta, gamma, E0, I0))
示例#6
0
 def posterior(x, pks={}):
     #print tot_branch_length
     prior_value = prior(x, p=p, use_skewed_distr=use_skewed_distr, pks=pks)
     if prior_value == -float('inf'):
         return -float('inf'), prior_value
     likelihood_value = likelihood(x, emp_cov, M=M)
     pks['prior'] = prior_value
     pks['likelihood'] = likelihood_value
     #pks['posterior']=prior_value+likelihood_value
     return likelihood_value, prior_value
示例#7
0
 def posterior(x, pks={}):
     #print tot_branch_length
     prior_value = prior(x, p=p, use_skewed_distr=use_skewed_distr, pks=pks)
     if prior_value == -float('inf'):
         return -float('inf'), prior_value
     likelihood_value = likelihood(x, emp_cov, M=M, pks=pks)
     pks['prior'] = prior_value
     pks['likelihood'] = likelihood_value
     prior_values = (pks['branch_prior'], pks['no_admix_prior'],
                     pks['admix_prop_prior'], pks['top_prior'])
     covariance = pks['covariance']
     #pks['posterior']=prior_value+likelihood_value
     return likelihood_value, prior_value, prior_values, covariance
示例#8
0
 def posterior(x, pks={}):
     #print tot_branch_length
     #print get_number_of_leaves(x[0]), emp_cov.shape[0]
     prior_value = prior(x,
                         p=p,
                         use_skewed_distr=use_skewed_distr,
                         pks=pks,
                         use_uniform_prior=use_uniform_prior)
     if prior_value == -float('inf'):
         return -float('inf'), prior_value
     likelihood_value = likelihood(x, emp_cov, M=M, nodes=nodes)
     pks['prior'] = prior_value
     pks['likelihood'] = likelihood_value
     #pks['posterior']=prior_value+likelihood_value
     return likelihood_value, prior_value
def generateGraphSpacewithUniformPrior():
    
    pos_args_string = request.args.get('pos_args')
    pos_args = json.loads(pos_args_string)

    neg_args_string = request.args.get('neg_args')
    neg_args = json.loads(neg_args_string)

    rating = int(request.args.get('rating'))

    # generate the graph images
    p_G = prior(pos_args, neg_args)
    graph_image_generator.create_graphs(pos_args, neg_args, p_G.arg_matrices)

    # generate the uniform distribution
    prior_distribution = [1/len(p_G.arg_matrices)] * len(p_G.arg_matrices)

    return jsonify(prior_distribution)
示例#10
0
def main():
    genCSV.genCSV()
    print('Reading Required Files')
    XTrain = pd.read_csv('../Data/XTrain.csv')
    yTrain = pd.read_csv('../Data/yTrain.csv') - 1
    XTest = pd.read_csv('../Data/XTest.csv')
    yTest = pd.read_csv('../Data/yTest.csv') - 1
    print('Calculating Prior')
    p = prior.prior(yTrain)
    print('Calculating Estimated Probailites')
    estProb = xGivenY.XGivenY(XTrain, yTrain)
    print('Classifying')
    yhatTrain = classify.classify(estProb, p, XTrain)
    print(
        ClassificationError.classificationError(yhatTrain.values,
                                                yTrain.values))
    print('Classifying the Test')
    yhatTest = classify.classify(estProb, p, XTest)
    print(
        ClassificationError.classificationError(yhatTest.values, yTest.values))
def generatePosteriorDistributionWithObsevation():

    observation = {}

    pos_args_string = request.args.get('pos_args')
    observation['pos_args'] = json.loads(pos_args_string)

    neg_args_string = request.args.get('neg_args')
    observation['neg_args'] = json.loads(neg_args_string)

    observation['rating'] = int(request.args.get('rating'))
    observation['observationNo'] = int(request.args.get('observationNo'))
    
    attacks_string = request.args.get('attacks')
    attacks_raw = json.loads(attacks_string)
    observation['attacks'] = [tuple(attack) for attack in attacks_raw]

    currentPriorString = request.args.get('currentPrior')
    currentPrior = np.array(json.loads(currentPriorString))

    # Will need to rebuild all of the important graph space data as this is needed for the liklihood construction
    graphSpaceSummaryString = request.args.get('graphSpaceSummary')
    graphSpaceSummary = json.loads(graphSpaceSummaryString)

    p_G = prior(graphSpaceSummary['pos_args'], graphSpaceSummary['neg_args'])
    p_G.rating = graphSpaceSummary['rating'] # This is possibly reckless coding

    p_G_T = liklihood(p_G, observation)

    liklihood_distribution = p_G_T.buildLiklihoodDistribution()

    p_T_G = posterior(currentPrior, liklihood_distribution)

    posterior_distribution = p_T_G.buildPosteriorDistribution()

    distributions = {}
    distributions['liklihood_distribution'] = list(liklihood_distribution)
    distributions['posterior_distribution'] = list(posterior_distribution)

    return jsonify(distributions)
示例#12
0
    def __init__(self, params_file):
        # Load info
        self.info = info(params_file)
        # Setup prior
        self.prior = prior(self.info["params"])
        # Setup likelihood
        self.likelihood = likelihood_dict(self.info["likelihoods"])
        # Consistency checks
        assert self.likelihood.dimension() == self.prior.dimension(), (
            "The dimensionalities of prior and likelihood do not match.")
        plot_likelihood(self.likelihood, self.prior)
        # Setup sample_set and sampler
        self.samples = samples_set(self.info["params"])
        # Setup sampler
        self.sampler = get_sampler(self.info["sampler"])(self.info["sampler"], self.prior, self.likelihood)
#        n_processes = 1
#        n_times_pool = 2
        # ERASE THE FORMER LINE!!!! And remove the corresponding paramenters
        n_diagnosis = 10
        # Fire sampler
        while not(self.sampler.check_convergence()):
            self.sampler.next_iteration()
            if not(self.sampler.i_iter%n_diagnosis):
                self.sampler.diagnosis(prepare_axes)
示例#13
0
        if self.observation_type == 'matching':
            self.liklihood_distribution = buildMatchingLiklihood(
                self.current_prior, self.observation_data,
                self.uniform_distribution_value)
        else:
            self.liklihood_distribution = buildSimilarLiklihood(
                self.current_prior, self.observation_data,
                self.uniform_distribution_value)

        return self.liklihood_distribution


pos_args = ['a', 'b']
neg_args = ['c']

p_G = prior(pos_args, neg_args)
prior_distribution = p_G.getDistribution(10)

observation = {}
observation['pos_args'] = ['a']
observation['neg_args'] = ['c']
observation['rating'] = 9
observation['attacks'] = [('a', 'c'), ('c', 'a')]

l = liklihood(p_G, observation)

liklihood_distribution = l.buildLiklihoodDistribution()

print(liklihood_distribution)

print('hello')
    exp_squared_diff = (x - mean)**2
    exp_power = -exp_squared_diff / (2 * var)
    exponent = math.exp(exp_power)
    denominator = ((2 * math.pi)**(1 / 2)) * (var**(1 / 2))
    normal_prob = exponent / denominator
    return normal_prob


"""####################input_value_and_answer########################"""
data = sio.loadmat('ecoli')
xTest = data['xTest']
xTrain = data['xTrain']
yTest = data['yTest']
yTrain = data['yTrain']
#answer for 1
p = prior(yTrain)
#answer for 2
M, V = likelihood(xTrain, yTrain)
#answer for 3
nb = naiveBayesClassify(xTest, M, V, p)
#answer for 4
y_t = yTest.T
y_test = y_t[0]
"""line1"""
er = y_test - nb
er = er.tolist()
number = er.count(0)
precision_total = number / len(nb)
"""line2"""
p1_index = []
for i in range(len(nb)):
示例#15
0
    def __init__(self):
        super(SSD, self).__init__(
            conv1_1=L.Convolution2D(3, 64, 3, pad=1),
            conv1_2=L.Convolution2D(64, 64, 3, pad=1),
            conv2_1=L.Convolution2D(64, 128, 3, pad=1),
            conv2_2=L.Convolution2D(128, 128, 3, pad=1),
            conv3_1=L.Convolution2D(128, 256, 3, pad=1),
            conv3_2=L.Convolution2D(256, 256, 3, pad=1),
            conv3_3=L.Convolution2D(256, 256, 3, pad=1),
            conv4_1=L.Convolution2D(256, 512, 3, pad=1),
            conv4_2=L.Convolution2D(512, 512, 3, pad=1),
            conv4_3=L.Convolution2D(512, 512, 3, pad=1),
            conv5_1=L.Convolution2D(512, 512, 3, pad=1),
            conv5_2=L.Convolution2D(512, 512, 3, pad=1),
            conv5_3=L.Convolution2D(512, 512, 3, pad=1),
            fc6=L.DilatedConvolution2D(512, 1024, 3, pad=6, dilate=6),
            fc7=L.Convolution2D(1024, 1024, 1),
            conv6_1=L.Convolution2D(1024, 256, 1),
            conv6_2=L.Convolution2D(256, 512, 3, stride=2, pad=1),
            conv7_1=L.Convolution2D(512, 128, 1),
            conv7_2=L.Convolution2D(128, 256, 3, stride=2, pad=1),
            conv8_1=L.Convolution2D(256, 128, 1),
            conv8_2=L.Convolution2D(128, 256, 3, stride=2, pad=1),
            normalize=L.Scale(W_shape=512),
            conv4_3_norm_mbox_loc=L.Convolution2D(512, 12, 3,
                                                  pad=1),  # 3 prior boxes
            conv4_3_norm_mbox_conf=L.Convolution2D(512, 63, 3, pad=1),
            fc7_mbox_loc=L.Convolution2D(1024, 24, 3, pad=1),  # 6 prior boxes
            fc7_mbox_conf=L.Convolution2D(1024, 126, 3, pad=1),
            conv6_2_mbox_loc=L.Convolution2D(512, 24, 3,
                                             pad=1),  # 6 prior boxes
            conv6_2_mbox_conf=L.Convolution2D(512, 126, 3, pad=1),
            conv7_2_mbox_loc=L.Convolution2D(256, 24, 3,
                                             pad=1),  # 6 prior boxes
            conv7_2_mbox_conf=L.Convolution2D(256, 126, 3, pad=1),
            conv8_2_mbox_loc=L.Convolution2D(256, 24, 3,
                                             pad=1),  # 6 prior boxes
            conv8_2_mbox_conf=L.Convolution2D(256, 126, 3, pad=1),
            pool6_mbox_loc=L.Convolution2D(256, 24, 3, pad=1),
            pool6_mbox_conf=L.Convolution2D(256, 126, 3,
                                            pad=1),  # 6 prior boxes
        )
        self.train = False
        self.set_info("c4", 38, 38, 3)
        self.set_info("f7", 19, 19, 6)
        self.set_info("c6", 10, 10, 6)
        self.set_info("c7", 5, 5, 6)
        self.set_info("c8", 3, 3, 6)
        self.set_info("p6", 1, 1, 6)

        self.conv4_3_norm_mbox_priorbox = prior.prior((38, 38), 30., 0, [2], 1,
                                                      1, (0.1, 0.1, 0.2, 0.2))
        self.fc7_mbox_priorbox = prior.prior((19, 19), 60., 114., [2, 3], 1, 1,
                                             (0.1, 0.1, 0.2, 0.2))
        self.conv6_2_mbox_priorbox = prior.prior((10, 10), 114., 168., [2, 3],
                                                 1, 1, (0.1, 0.1, 0.2, 0.2))
        self.conv7_2_mbox_priorbox = prior.prior((5, 5), 168., 222., [2, 3], 1,
                                                 1, (0.1, 0.1, 0.2, 0.2))
        self.conv8_2_mbox_priorbox = prior.prior((3, 3), 222., 276., [2, 3], 1,
                                                 1, (0.1, 0.1, 0.2, 0.2))
        self.pool6_mbox_priorbox = prior.prior((1, 1), 276., 330., [2, 3], 1,
                                               1, (0.1, 0.1, 0.2, 0.2))
        self.mbox_prior = np.concatenate([
            self.conv4_3_norm_mbox_priorbox.reshape(-1, 2, 4),
            self.fc7_mbox_priorbox.reshape(-1, 2, 4),
            self.conv6_2_mbox_priorbox.reshape(-1, 2, 4),
            self.conv7_2_mbox_priorbox.reshape(-1, 2, 4),
            self.conv8_2_mbox_priorbox.reshape(-1, 2, 4),
            self.pool6_mbox_priorbox.reshape(-1, 2, 4),
        ],
                                         axis=0)
示例#16
0
    def __call__(self, params, dtype=np.double):
        q, C, p = params

        from math import log

        return (prior.prior(q, C, p) + likelihood.likelihood(q, C, p))
示例#17
0
def buildSimilarOverlapLiklihood(current_prior, observation_data,
                                 uniform_distribution_value):

    graph_space_args = current_prior.pos_args + current_prior.neg_args
    observation_args = observation_data['pos_args'] + observation_data[
        'neg_args']

    pos_args_overlap = list(
        set(current_prior.pos_args).intersection(
            set(observation_data['pos_args'])))
    neg_args_overlap = list(
        set(current_prior.neg_args).intersection(
            set(observation_data['neg_args'])))

    p_G_overlap = prior(pos_args_overlap, neg_args_overlap)
    prior_distribution_overlap = p_G_overlap.getDistribution(10)

    observation_args_overlap_indices = [
        observation_args.index(arg)
        for arg in pos_args_overlap + neg_args_overlap
    ]

    observation_overlap_graph = (observation_data['argMtx'])[
        observation_args_overlap_indices, :][:,
                                             observation_args_overlap_indices]

    graph_space_args_overlap_indices = [
        graph_space_args.index(arg)
        for arg in pos_args_overlap + neg_args_overlap
    ]

    prior_overlap_graphs = []
    for arg_mtx in current_prior.arg_matrices:
        graph_space_overlap_graph = arg_mtx[
            graph_space_args_overlap_indices, :][:,
                                                 graph_space_args_overlap_indices]
        prior_overlap_graphs.append(graph_space_overlap_graph)

    observation_agg = 0
    for idx, arg_mtx in enumerate(p_G_overlap.arg_matrices):
        if np.array_equal(observation_overlap_graph, arg_mtx):
            observation_agg = p_G_overlap.graph_data_complete[idx, 3]
            break

    prior_overlap_aggs = []
    for prior_overlap_graph in prior_overlap_graphs:
        for idx, arg_mtx in enumerate(p_G_overlap.arg_matrices):
            if np.array_equal(prior_overlap_graph, arg_mtx):
                prior_overlap_aggs.append(p_G_overlap.graph_data_complete[idx,
                                                                          3])
                break

    diffs_grounded = []
    for graph in current_prior.arg_matrices:
        diffs_grounded.append(
            groundedDiff(observation_data, graph, observation_args,
                         graph_space_args))

    agg_distances = 1 / (
        1 + np.abs(np.array(prior_overlap_aggs) - observation_agg))

    normalising_constant = np.sum(agg_distances)

    normalised_agg_distances = agg_distances / normalising_constant

    diffs_grounded = np.array(diffs_grounded)

    dis_total = normalised_agg_distances + diffs_grounded
    normalising_constant = np.sum(dis_total)

    dis_total_distribution = dis_total / normalising_constant

    delta_args = len(list(
        set(observation_args).intersection(graph_space_args))) / np.max(
            [len(observation_args),
             len(graph_space_args)])
    delta_rating = (
        10 - (np.abs(current_prior.rating - observation_data['rating']))) / 10

    liklihood_distribution = uniform_distribution_value - (
        delta_args * delta_rating *
        (uniform_distribution_value - dis_total_distribution))

    return liklihood_distribution