Exemplo n.º 1
0
    def verify_numpy(self, evidence, marginals1):

        size = batch_size = d.evd_size(evidence)

        u.show(
            f'\nVerifying against classical AC (numpy arrays, batch_size {batch_size})'
        )

        # split lambdas into scalars (with batch)
        evidence = self.split_evidence(evidence)

        eval_time = 0  # pure evaluation time (add/mul/div)
        for start in range(0, size, batch_size):
            u.show(f'{int(100*start/size):4d}%\r', end='', flush=True)
            stop = start + batch_size
            evidence_batch = d.evd_slice(evidence, start, stop)
            marginals_batch, et = self.evaluate_numpy(evidence_batch)
            marginals_batch = self.gather_marginals(marginals_batch)
            if start == 0:
                marginals2 = marginals_batch
            else:
                marginals2 = np.concatenate((marginals2, marginals_batch),
                                            axis=0)
        eval_time += et

        u.equal(marginals1, marginals2, tolerance=True)

        size = d.evd_size(evidence)
        u.show(
            f'Evaluation Time: {eval_time:.3f} sec ({1000*eval_time/size:.0f} ms per example)'
        )
        return eval_time, batch_size
Exemplo n.º 2
0
def validate(size,output,testing,elm_method='minfill',elm_wait=30):
    
    circuit_type = 'TAC' if testing else 'AC'
    
    # get data (ground truth)
    evidence, labels = rdata.get(size,output)
    
    u.show(f'\n===Checking {circuit_type} for rectangle {output} in {size}x{size} images: {len(labels)} total')
    
    # get model
    bn, inputs = rmodel.get(size,output,testing=testing,use_bk=True,tie_parameters=False)
    
    # compile model
    AC = tac.TAC(bn,inputs,output,trainable=False,profile=False,
            elm_method=elm_method,elm_wait=elm_wait)

    # evaluate TAC on evidence to get predictions
    predictions = AC.evaluate(evidence)

    # verify that predictions match one_hot_marginals
    if u.equal(predictions,labels): 
        u.show('\n===All good!')
    else:
        u.show('***bumper!!!')
        quit()
Exemplo n.º 3
0
    def verify_array(self, evidence, marginals1):
        u.show(f'\nVerifying against classical AC (array)...')

        size = d.evd_size(evidence)
        rows = d.evd_col2row(evidence)
        marginals2 = []

        # evaluation time excludes assertion of evidence
        eval_time = 0  # pure evaluation time (add/mul/div)
        for lambdas in rows:
            self.assert_evidence_array(lambdas)
            marginal, et = self.evaluate_array()  # np array
            marginals2.append(marginal)
            eval_time += et

        marginals2 = np.array(marginals2, dtype=np.float32)
        u.equal(marginals1, marginals2, tolerance=True)

        u.show(
            f'Evaluation Time: {eval_time:.3f} sec ({1000*eval_time/size:.0f} ms per example)'
        )
        return eval_time, 1
Exemplo n.º 4
0
    def verify_tf_graph(self, evidence, marginals1):
        assert self.tf_ac is not None

        size = batch_size = d.evd_size(evidence)

        u.show(
            f'\nVerifying against classical AC (tf graph, batch_size {batch_size}))'
        )

        # split lambdas into scalars (with batch)
        evidence = self.split_evidence(evidence)

        # tf graph accepts only tensors as input
        evidence = tuple(tf.constant(e, dtype=tf.float32) for e in evidence)

        start_eval_time = time.perf_counter()
        for start in range(0, size, batch_size):
            u.show(f'{int(100*start/size):4d}%\r', end='', flush=True)
            stop = start + batch_size
            evidence_batch = d.evd_slice(evidence, start, stop)
            marginals_batch = self.tf_ac(
                *evidence_batch)  # evaluating tf graph
            marginals_batch = self.gather_marginals(marginals_batch)
            if start == 0:
                marginals2 = marginals_batch
            else:
                marginals2 = np.concatenate((marginals2, marginals_batch),
                                            axis=0)
        eval_time = time.perf_counter() - start_eval_time

        u.equal(marginals1, marginals2, tolerance=True)

        size = d.evd_size(evidence)
        u.show(
            f'Evaluation Time: {eval_time:.3f} sec ({1000*eval_time/size:.0f} ms per example)'
        )
        return eval_time, batch_size
Exemplo n.º 5
0
def validate(size, digits, testing, elm_method='minfill', elm_wait=30):
    assert size >= 7
    assert all(d in range(10) for d in digits)

    # get data (ground truth)
    evidence, labels = ddata.get(size, digits)
    data_size = len(labels)

    circuit_type = 'TAC' if testing else 'AC'
    u.show(
        f'\n===Checking {circuit_type} for digits {digits} in {size}x{size} images: {data_size} total'
    )

    # get model
    net, inputs, output = dmodel.get(size,
                                     digits,
                                     testing,
                                     use_bk=True,
                                     tie_parameters=False,
                                     remove_common=False)

    # compile model into circuit
    circuit = tac.TAC(net,
                      inputs,
                      output,
                      trainable=False,
                      profile=False,
                      elm_method=elm_method,
                      elm_wait=elm_wait)

    # evaluate circuit on evidence to get predictions
    predictions = circuit.evaluate(evidence)

    # verify that predictions match labels
    if u.equal(predictions, labels):
        u.show('\n===All good!\n')
    else:
        u.show('***bumper!!!')
        quit()
Exemplo n.º 6
0
def validateThirdOrderHMM(size, card, num_examples=10):
    # validate AC of HMM model by running query pr(X_t,Y[1:t]) compared to the forward algorithm
    transition = np.random.rand(card, card, card, card)
    transition_sum = np.sum(transition, axis=-1, keepdims=True)
    transition = transition / transition_sum
    emission = np.random.rand(card, card)
    emission_sum = np.sum(emission, axis=1, keepdims=True)
    emission = emission / emission_sum
    # generate random transition and emission probabilities
    bn = hmm.getNthOrderHMM(size,
                            card,
                            3,
                            param=True,
                            transition=transition,
                            emission=emission)
    # define an hmm model with these parameters
    logging("Start testing third order HMM of length {}".format(size))
    inputs = ['e_' + str(i) for i in range(size - 1)]
    output = 'h_' + str(size - 1)
    ac = tac.TAC(bn, inputs, output, trainable=False)
    # compile an ac that computes pr(X_T+1, Y[1:T])
    evidence_ac, evidence_dp = generate_hard_evidence(size - 1, 1)
    labels_ac = ac.evaluate(evidence_ac)
    logging("ac labels: %s" % (labels_ac))
    labels_dp = []
    for evid in evidence_dp:
        label = hmm.predictThirdOrder(size, evid, transition, emission)
        labels_dp.append(label)
    labels_dp = np.stack(labels_dp)
    logging("forward labels: %s" % (labels_dp))
    if u.equal(labels_ac, labels_dp, tolerance=True):
        logging("Successfully validate third order HMM of length {}\n".format(
            size))
    else:
        logging(
            "Inconsistence queries for third order HMM of length {}\n".format(
                size))
Exemplo n.º 7
0
        output = net(inputs)  # (bs, 40)
        loss = loss_func(output, label)

        for i in range(4):
            pre = F.log_softmax(output[:, 10 * i:10 * i + 10],
                                dim=1)  # (bs, 10)
            # 按行取最大值并返回列的索引值
            pred = torch.cat((pred, pre.data.max(1, keepdim=True)[1].cpu()),
                             dim=1)  #

        loss.backward()
        optimizer.step()

        running_loss += loss.data * inputs.shape[0]
        running_corrects += equal(pred.numpy()[:, 1:],
                                  label.data.cpu().numpy().astype(int))

    epoch_loss = running_loss / dataset_size
    epoch_acc = running_corrects / dataset_size

    if epoch_acc > best_acc:
        best_acc = epoch_acc
        best_model_wts = copy.deepcopy(net.state_dict())

    if epoch == DefaultConfig.EPOCH - 1:
        torch.save(
            best_model_wts,
            DefaultConfig.file_path + '../checkpoint/best_model_wts.pkl')

    print()