Beispiel #1
0
 def get(self):
     data = super(System, self).get()
     data['cpuInfoDto'] = random_data(
         data['cpuInfoDto'], 'capacity',
         'usedCapacity', 'freeCapacity', 600, 1600)
     data['memInfoDto'] = random_data(
         data['memInfoDto'], 'totalMemory',
         'usedMemory', 'freeMemory', 3500, 4700)
     return data
def test_est_propensity():

	D = np.array([0, 0, 0, 1, 1, 1])
	X = np.array([[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]])
	Y = random_data(D_cur=D, X_cur=X)
	causal = c.CausalModel(Y, D, X)

	causal.est_propensity()
	lin = [0, 1]
	qua = []
	coef = np.array([6.8066090, -0.0244874, -0.7524939])
	loglike = -3.626517
	fitted = np.array([0.6491366, 0.3117840, 0.2911631,
	                   0.8086407, 0.3013733, 0.6379023])
	se = np.array([8.5373779, 0.4595191, 0.8106499])
	keys = {'lin', 'qua', 'coef', 'loglike', 'fitted', 'se'}
	
	assert_equal(causal.propensity['lin'], lin)
	assert_equal(causal.propensity['qua'], qua)
	assert np.allclose(causal.propensity['coef'], coef)
	assert np.allclose(causal.propensity['loglike'], loglike)
	assert np.allclose(causal.propensity['fitted'], fitted)
	assert np.allclose(causal.propensity['se'], se)
	assert_equal(set(causal.propensity.keys()), keys)
	assert np.allclose(causal.raw_data['pscore'], fitted)
def test_select_qua_terms():

    Y, D, X = random_data()
    X_c_random, X_t_random = X[D == 0], X[D == 1]

    lin1 = [0, 1]
    C1 = np.inf
    ans1 = []
    assert_equal(p.select_qua_terms(X_c_random, X_t_random, lin1, C1), ans1)

    lin2 = [1, 0]
    C2 = 0
    ans2 = [(1, 1), (1, 0), (0, 0)]
    assert_equal(p.select_qua_terms(X_c_random, X_t_random, lin2, C2), ans2)

    lin3 = [0]
    C3 = -983.340
    ans3 = [(0, 0)]
    assert_equal(p.select_qua_terms(X_c_random, X_t_random, lin3, C3), ans3)

    lin4 = []
    C4 = 34.234
    ans4 = []
    assert_equal(p.select_qua_terms(X_c_random, X_t_random, lin4, C4), ans4)

    X_c = np.array([[7, 8], [3, 10], [7, 10]])
    X_t = np.array([[4, 7], [5, 10], [9, 8]])

    lin5 = [0, 1]
    C5 = 1.1
    ans5 = [(1, 1), (0, 1), (0, 0)]
    assert_equal(p.select_qua_terms(X_c, X_t, lin5, C5), ans5)
def test_select_lin():

    Y, D, X = random_data(K=4)
    X_c_random, X_t_random = X[D == 0], X[D == 1]

    lin1 = [0, 1, 2, 3]
    C1 = np.random.rand(1)
    ans1 = [0, 1, 2, 3]
    assert_equal(p.select_lin(X_c_random, X_t_random, lin1, C1), ans1)

    X_c = np.array([[1, 2], [9, 7]])
    X_t = np.array([[1, 4], [9, 6]])

    lin2 = []
    C2 = 0.07
    ans2 = []
    assert_equal(p.select_lin(X_c, X_t, lin2, C2), ans2)

    lin3 = []
    C3 = 0.06
    ans3 = [1, 0]
    assert_equal(p.select_lin(X_c, X_t, lin3, C3), ans3)

    lin4 = [1]
    C4 = 0.35
    ans4 = [1]
    assert_equal(p.select_lin(X_c, X_t, lin4, C4), ans4)

    lin5 = [1]
    C5 = 0.34
    ans5 = [1, 0]
    assert_equal(p.select_lin(X_c, X_t, lin5, C5), ans5)
def test_select_lin_terms():

    Y, D, X = random_data(K=4)
    X_c_random, X_t_random = X[D == 0], X[D == 1]

    lin1 = [3, 0, 1]
    C1 = np.inf
    ans1 = [3, 0, 1]
    assert_equal(p.select_lin_terms(X_c_random, X_t_random, lin1, C1), ans1)

    lin2 = [2]
    C2 = 0
    ans2 = [2, 0, 1, 3]
    assert_equal(p.select_lin_terms(X_c_random, X_t_random, lin2, C2), ans2)

    lin3 = []
    C3 = 0
    ans3 = [0, 1, 2, 3]
    assert_equal(p.select_lin_terms(X_c_random, X_t_random, lin3, C3), ans3)

    lin4 = [3, 1]
    C4 = -34.234
    ans4 = [3, 1, 0, 2]
    assert_equal(p.select_lin_terms(X_c_random, X_t_random, lin4, C4), ans4)

    X_c = np.array([[1, 2], [9, 7]])
    X_t = np.array([[1, 4], [9, 7]])

    lin5 = []
    C5 = 0.06
    ans5 = [1, 0]
    assert_equal(p.select_lin_terms(X_c, X_t, lin5, C5), ans5)
Beispiel #6
0
def test_est_propensity():

    D = np.array([0, 0, 0, 1, 1, 1])
    X = np.array([[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]])
    Y = random_data(D_cur=D, X_cur=X)
    causal = c.CausalModel(Y, D, X)

    causal.est_propensity(feature_names=[], exclude=[])
    lin = [0, 1]
    qua = []
    coef = np.array([6.8066090, -0.0244874, -0.7524939])
    loglike = -3.626517
    fitted = np.array(
        [0.6491366, 0.3117840, 0.2911631, 0.8086407, 0.3013733, 0.6379023])
    se = np.array([8.5373779, 0.4595191, 0.8106499])
    keys = {'lin', 'qua', 'coef', 'loglike', 'fitted', 'se'}

    assert_equal(causal.propensity['lin'], lin)
    assert_equal(causal.propensity['qua'], qua)
    assert np.allclose(causal.propensity['coef'], coef)
    assert np.allclose(causal.propensity['loglike'], loglike)
    assert np.allclose(causal.propensity['fitted'], fitted)
    assert np.allclose(causal.propensity['se'], se)
    assert_equal(set(causal.propensity.keys()), keys)
    assert np.allclose(causal.raw_data['pscore'], fitted)
def test_select_lin_terms():

	Y, D, X = random_data(K=4)
	X_c_random, X_t_random = X[D==0], X[D==1]

	lin1 = [3, 0, 1]
	C1 = np.inf
	ans1 = [3, 0, 1]
	assert_equal(p.select_lin_terms(X_c_random, X_t_random, lin1, C1), ans1)

	lin2 = [2]
	C2 = 0
	ans2 = [2, 0, 1, 3]
	assert_equal(p.select_lin_terms(X_c_random, X_t_random, lin2, C2), ans2)
	
	lin3 = []
	C3 = 0
	ans3 = [0, 1, 2, 3]
	assert_equal(p.select_lin_terms(X_c_random, X_t_random, lin3, C3), ans3)
	
	lin4 = [3, 1]
	C4 = -34.234
	ans4 = [3, 1, 0, 2]
	assert_equal(p.select_lin_terms(X_c_random, X_t_random, lin4, C4), ans4)

	X_c = np.array([[1, 2], [9, 7]])
	X_t = np.array([[1, 4], [9, 7]])

	lin5 = []
	C5 = 0.06
	ans5 = [1, 0]
	assert_equal(p.select_lin_terms(X_c, X_t, lin5, C5), ans5)
def test_select_lin():

	Y, D, X = random_data(K=4)
	X_c_random, X_t_random = X[D==0], X[D==1]

	lin1 = [0, 1, 2, 3]
	C1 = np.random.rand(1)
	ans1 = [0, 1, 2, 3]
	assert_equal(p.select_lin(X_c_random, X_t_random, lin1, C1), ans1)

	X_c = np.array([[1, 2], [9, 7]])
	X_t = np.array([[1, 4], [9, 6]])

	lin2 = []
	C2 = 0.07
	ans2 = []
	assert_equal(p.select_lin(X_c, X_t, lin2, C2), ans2)

	lin3 = []
	C3 = 0.06
	ans3 = [1, 0]
	assert_equal(p.select_lin(X_c, X_t, lin3, C3), ans3)

	lin4 = [1]
	C4 = 0.35
	ans4 = [1]
	assert_equal(p.select_lin(X_c, X_t, lin4, C4), ans4)

	lin5 = [1]
	C5 = 0.34
	ans5 = [1, 0]
	assert_equal(p.select_lin(X_c, X_t, lin5, C5), ans5)
def test_select_qua_terms():

	Y, D, X = random_data()
	X_c_random, X_t_random = X[D==0], X[D==1]

	lin1 = [0, 1]
	C1 = np.inf
	ans1 = []
	assert_equal(p.select_qua_terms(X_c_random, X_t_random, lin1, C1), ans1)

	lin2 = [1, 0]
	C2 = 0
	ans2 = [(1, 1), (1, 0), (0, 0)]
	assert_equal(p.select_qua_terms(X_c_random, X_t_random, lin2, C2), ans2)
	
	lin3 = [0]
	C3 = -983.340
	ans3 = [(0, 0)]
	assert_equal(p.select_qua_terms(X_c_random, X_t_random, lin3, C3), ans3)
	
	lin4 = []
	C4 = 34.234
	ans4 = []
	assert_equal(p.select_qua_terms(X_c_random, X_t_random, lin4, C4), ans4)

	X_c = np.array([[7, 8], [3, 10], [7, 10]])
	X_t = np.array([[4, 7], [5, 10], [9, 8]])

	lin5 = [0, 1]
	C5 = 1.1
	ans5 = [(1, 1), (0, 1), (0, 0)]
	assert_equal(p.select_qua_terms(X_c, X_t, lin5, C5), ans5)
Beispiel #10
0
    def __init__(self,
                 timesheet=None,
                 date=date.today(),
                 data=None,
                 div_size=DIV_SIZE,
                 total_time=TOTAL_TIME,
                 start_time=0):
        """
        Inputs
        ------
            div_size: The size of the time divisions of a particular day in minutes. A div_size that doesnt break down the day into whole number minute divisions is rounded to the nearest div_size that accomplishes this
            data: The on/off light data that will be given by the microcontroller, 'random' for randome data, 'ones' for ones, deafult is zeros
            date: the date of the data
        """

        self.date = date
        self.div_size = u.get_whole_div(div_size, FACTORS)
        self.start_time = start_time
        self.total_time = total_time
        self.end_time = self.start_time + self.total_time - 5
        if isinstance(timesheet, pd.DataFrame):
            self.timesheet = timesheet
        else:
            if data is 'random':
                data = u.random_data(self.div_size)
            if data is 'ones':
                data = np.full(total_time // self.div_size, 1)
            self.timesheet = u.construct_dataframe(
                data, self.div_size, self.date.strftime('%Y-%m-%d'),
                self.total_time, self.start_time)
Beispiel #11
0
def main(args):
    dNet = net.DigitRecNet()
    optimizer = optim.SGD(dNet.parameters(), lr=args.lr, momentum=0.5)
    criterion = torch.nn.NLLLoss()

    if not args.train:
        logging.info('-' * 50)
        logging.info('Start testing ... ')
        load_model(dNet, args.model_file, 'BestModel')
        logging.info('finish load model: %s' % args.model_file)
        test_x = utils.load_test_data(args.test_file, args.N, args.M)
        logging.info('Load test : %d' % len(test_x))
        test_input_x = Variable(torch.FloatTensor(test_x))
        test_input_x = test_input_x.resize(test_input_x.size()[0], 1, args.N,
                                           args.M)
        only_test(dNet, test_input_x, args.result_file)
        return

    train_x, train_y = utils.load_data(args.train_file, args.N, args.M)
    dev_x, dev_y = utils.load_data(args.dev_file, args.N, args.M)
    logging.info('-' * 50)
    logging.info('Load train : %d, Load dev : %d' % (len(train_x), len(dev_x)))

    #train
    logging.info('-' * 50)
    logging.info('Start training ... ')

    dev_input_x = Variable(torch.FloatTensor(dev_x))
    dev_input_x = dev_input_x.resize(dev_input_x.size()[0], 1, args.N, args.M)
    dev_pred_y = Variable(torch.LongTensor(dev_y))

    best_accuracy = 0
    for epoch_id in range(args.epoch):
        logging.info('Epoch : %d' % epoch_id)

        data = utils.random_data((train_x, train_y), args.batch_size)
        for it, (input_x, pred_y) in enumerate(data):
            input_x = Variable(torch.FloatTensor(input_x))
            input_x = input_x.resize(input_x.size()[0], 1, args.N, args.M)
            pred_y = Variable(torch.LongTensor(pred_y))
            assert input_x.size()[0] == pred_y.size()[0]

            optimizer.zero_grad()
            output_x = dNet(input_x)
            loss = criterion(output_x, pred_y)
            loss.backward()
            optimizer.step()

            logging.info('Iteration (%d) loss : %.6f' % (it, loss))

            if (it % args.iter_cnt == 0):
                tmp_accuracy = test(dNet, dev_input_x, dev_pred_y)
                if tmp_accuracy > best_accuracy:
                    best_accuracy = tmp_accuracy
                    save_model(dNet, epoch_id, args.model_file, 'Best')
                logging.info(
                    "Epoch : %d, Accuarcy : %.2f%%, Best Accuatcy : %.2f%%" %
                    (epoch_id, tmp_accuracy, best_accuracy))
def test_select_qua():

	Y, D, X = random_data()
	X_c_random, X_t_random = X[D==0], X[D==1]

	lin1 = [1, 0]
	qua1 = [(1, 0), (0, 0), (1, 1)]
	C1 = np.random.rand(1)
	ans1 = [(1, 0), (0, 0), (1, 1)]
	assert_equal(p.select_qua(X_c_random, X_t_random, lin1, qua1, C1), ans1)

	lin2 = [1]
	qua2 = [(1, 1)]
	C2 = np.random.rand(1)
	ans2 = [(1, 1)]
	assert_equal(p.select_qua(X_c_random, X_t_random, lin2, qua2, C2), ans2)

	X_c = np.array([[7, 8], [3, 10], [7, 10]])
	X_t = np.array([[4, 7], [5, 10], [9, 8]])

	lin3 = [0, 1]
	qua3 = []
	C3 = 1.2
	ans3 = []
	assert_equal(p.select_qua(X_c, X_t, lin3, qua3, C3), ans3)

	lin4 = [0, 1]
	qua4 = []
	C4 = 1.1
	ans4 = [(1, 1), (0, 1), (0, 0)]
	assert_equal(p.select_qua(X_c, X_t, lin4, qua4, C4), ans4)

	lin5 = [0, 1]
	qua5 = [(1, 1)]
	C5 = 2.4
	ans5 = [(1, 1)]
	assert_equal(p.select_qua(X_c, X_t, lin5, qua5, C5), ans5)

	lin6 = [0, 1]
	qua6 = [(1, 1)]
	C6 = 2.3
	ans6 = [(1, 1), (0, 1), (0, 0)]
	assert_equal(p.select_qua(X_c, X_t, lin6, qua6, C6), ans6)

	lin7 = [0, 1]
	qua7 = [(1, 1), (0, 1)]
	C7 = 3.9
	ans7 = [(1, 1), (0, 1)]
	assert_equal(p.select_qua(X_c, X_t, lin7, qua7, C7), ans7)

	lin8 = [0, 1]
	qua8 = [(1, 1), (0, 1)]
	C8 = 3.8
	ans8 = [(1, 1), (0, 1), (0, 0)]
	assert_equal(p.select_qua(X_c, X_t, lin8, qua8, C8), ans8)
def test_select_qua():

    Y, D, X = random_data()
    X_c_random, X_t_random = X[D == 0], X[D == 1]

    lin1 = [1, 0]
    qua1 = [(1, 0), (0, 0), (1, 1)]
    C1 = np.random.rand(1)
    ans1 = [(1, 0), (0, 0), (1, 1)]
    assert_equal(p.select_qua(X_c_random, X_t_random, lin1, qua1, C1), ans1)

    lin2 = [1]
    qua2 = [(1, 1)]
    C2 = np.random.rand(1)
    ans2 = [(1, 1)]
    assert_equal(p.select_qua(X_c_random, X_t_random, lin2, qua2, C2), ans2)

    X_c = np.array([[7, 8], [3, 10], [7, 10]])
    X_t = np.array([[4, 7], [5, 10], [9, 8]])

    lin3 = [0, 1]
    qua3 = []
    C3 = 1.2
    ans3 = []
    assert_equal(p.select_qua(X_c, X_t, lin3, qua3, C3), ans3)

    lin4 = [0, 1]
    qua4 = []
    C4 = 1.1
    ans4 = [(1, 1), (0, 1), (0, 0)]
    assert_equal(p.select_qua(X_c, X_t, lin4, qua4, C4), ans4)

    lin5 = [0, 1]
    qua5 = [(1, 1)]
    C5 = 2.4
    ans5 = [(1, 1)]
    assert_equal(p.select_qua(X_c, X_t, lin5, qua5, C5), ans5)

    lin6 = [0, 1]
    qua6 = [(1, 1)]
    C6 = 2.3
    ans6 = [(1, 1), (0, 1), (0, 0)]
    assert_equal(p.select_qua(X_c, X_t, lin6, qua6, C6), ans6)

    lin7 = [0, 1]
    qua7 = [(1, 1), (0, 1)]
    C7 = 3.9
    ans7 = [(1, 1), (0, 1)]
    assert_equal(p.select_qua(X_c, X_t, lin7, qua7, C7), ans7)

    lin8 = [0, 1]
    qua8 = [(1, 1), (0, 1)]
    C8 = 3.8
    ans8 = [(1, 1), (0, 1), (0, 0)]
    assert_equal(p.select_qua(X_c, X_t, lin8, qua8, C8), ans8)
Beispiel #14
0
def test_propensity():
    import causalinference.core.data as d
    import causalinference.core.propensity as p
    from utils import random_data

    D = np.array([0, 0, 0, 1, 1, 1])
    X = np.array([[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]])
    Y = random_data(D_cur=D, X_cur=X)
    print Y

    data = d.Data(Y, D, X)
    propensity = p.Propensity(data, [0, 1], [])
    print propensity
Beispiel #15
0
def test_propensity():
	import causalinference.core.data as d
	import causalinference.core.propensity as p
	from utils import random_data


	D = np.array([0, 0, 0, 1, 1, 1])
	X = np.array([[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]])
	Y = random_data(D_cur=D, X_cur=X)
	print Y

	data = d.Data(Y, D, X)
	propensity = p.Propensity(data, [0, 1], [])
	print propensity
Beispiel #16
0
def test_est_propensity_s():

    D = np.array([0, 0, 0, 1, 1, 1])
    X = np.array([[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]])
    Y = random_data(D_cur=D, X_cur=X)
    causal = c.CausalModel(Y, D, X)

    causal.est_propensity_s()
    lin1 = [1]
    qua1 = []
    coef1 = np.array([6.5424027, -0.7392041])
    loglike1 = -3.627939
    fitted1 = np.array(
        [0.6522105, 0.2995088, 0.2995088, 0.7970526, 0.2995088, 0.6522105])
    se1 = np.array([6.8455179, 0.7641445])
    keys = {'lin', 'qua', 'coef', 'loglike', 'fitted', 'se'}

    assert_equal(causal.propensity['lin'], lin1)
    assert_equal(causal.propensity['qua'], qua1)
    assert np.allclose(causal.propensity['coef'], coef1)
    assert np.allclose(causal.propensity['loglike'], loglike1)
    assert np.allclose(causal.propensity['fitted'], fitted1)
    assert np.allclose(causal.propensity['se'], se1)
    assert_equal(set(causal.propensity.keys()), keys)
    assert np.allclose(causal.raw_data['pscore'], fitted1)

    causal.est_propensity_s([0, 1])
    lin2 = [0, 1]
    qua2 = []
    coef2 = np.array([6.8066090, -0.0244874, -0.7524939])
    loglike2 = -3.626517
    fitted2 = np.array(
        [0.6491366, 0.3117840, 0.2911631, 0.8086407, 0.3013733, 0.6379023])
    se2 = np.array([8.5373779, 0.4595191, 0.8106499])

    assert_equal(causal.propensity['lin'], lin2)
    assert_equal(causal.propensity['qua'], qua2)
    assert np.allclose(causal.propensity['coef'], coef2)
    assert np.allclose(causal.propensity['loglike'], loglike2)
    assert np.allclose(causal.propensity['fitted'], fitted2)
    assert np.allclose(causal.propensity['se'], se2)
    assert np.allclose(causal.raw_data['pscore'], fitted2)
Beispiel #17
0
def test_est_propensity_s():

	D = np.array([0, 0, 0, 1, 1, 1])
	X = np.array([[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]])
	Y = random_data(D_cur=D, X_cur=X)
	causal = c.CausalModel(Y, D, X)

	causal.est_propensity_s()
	lin1 = [1]
	qua1 = []
	coef1 = np.array([6.5424027, -0.7392041])
	loglike1 = -3.627939
	fitted1 = np.array([0.6522105, 0.2995088, 0.2995088,
	                   0.7970526, 0.2995088, 0.6522105])
	se1 = np.array([6.8455179, 0.7641445])
	keys = {'lin', 'qua', 'coef', 'loglike', 'fitted', 'se'}
	
	assert_equal(causal.propensity['lin'], lin1)
	assert_equal(causal.propensity['qua'], qua1)
	assert np.allclose(causal.propensity['coef'], coef1)
	assert np.allclose(causal.propensity['loglike'], loglike1)
	assert np.allclose(causal.propensity['fitted'], fitted1)
	assert np.allclose(causal.propensity['se'], se1)
	assert_equal(set(causal.propensity.keys()), keys)
	assert np.allclose(causal.raw_data['pscore'], fitted1)

	causal.est_propensity_s([0,1])
	lin2 = [0, 1]
	qua2 = []
	coef2 = np.array([6.8066090, -0.0244874, -0.7524939])
	loglike2 = -3.626517
	fitted2 = np.array([0.6491366, 0.3117840, 0.2911631,
	                    0.8086407, 0.3013733, 0.6379023])
	se2 = np.array([8.5373779, 0.4595191, 0.8106499])

	assert_equal(causal.propensity['lin'], lin2)
	assert_equal(causal.propensity['qua'], qua2)
	assert np.allclose(causal.propensity['coef'], coef2)
	assert np.allclose(causal.propensity['loglike'], loglike2)
	assert np.allclose(causal.propensity['fitted'], fitted2)
	assert np.allclose(causal.propensity['se'], se2)
	assert np.allclose(causal.raw_data['pscore'], fitted2)
Beispiel #18
0
def causal_ATE():
    from causalinference import CausalModel
    from utils import random_data

    D = np.array([0, 0, 0, 1, 1, 1])
    X = np.array([[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]])
    Y = random_data(D_cur=D, X_cur=X)
    print Y

    causal = CausalModel(Y, D, X)
    #causal.est_via_ols()
    #print causal.estimates

    causal.est_propensity_s()
    print causal.propensity
    # -*- coding: utf-8 -*-
    #プロペンシティスコアを元に自分でマッチングすれば良い。

    #estimated propensity scores
    print causal.propensity['fitted']
Beispiel #19
0
def causal_ATE():
	from causalinference import CausalModel
	from utils import random_data

        D = np.array([0, 0, 0, 1, 1, 1])
        X = np.array([[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]])
        Y = random_data(D_cur=D, X_cur=X)
	print Y

	causal = CausalModel(Y, D, X)
	#causal.est_via_ols()
	#print causal.estimates	
	
	causal.est_propensity_s()
	print causal.propensity
	# -*- coding: utf-8 -*-
	#プロペンシティスコアを元に自分でマッチングすれば良い。

	#estimated propensity scores
	print causal.propensity['fitted']	
def test_propensityselect():

	D = np.array([0, 0, 0, 1, 1, 1])
	X = np.array([[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]])
	Y = random_data(D_cur=D, X_cur=X)
	data = d.Data(Y, D, X)

	propensity1 = p.PropensitySelect(data, [], 1, 2.71)
	lin1 = [1]
	qua1 = []
	coef1 = np.array([6.5424027, -0.7392041])
	loglike1 = -3.627939
	fitted1 = np.array([0.6522105, 0.2995088, 0.2995088,
	                   0.7970526, 0.2995088, 0.6522105])
	se1 = np.array([6.8455179, 0.7641445])
	keys = {'lin', 'qua', 'coef', 'loglike', 'fitted', 'se'}
	
	assert_equal(propensity1['lin'], lin1)
	assert_equal(propensity1['qua'], qua1)
	assert np.allclose(propensity1['coef'], coef1)
	assert np.allclose(propensity1['loglike'], loglike1)
	assert np.allclose(propensity1['fitted'], fitted1)
	assert np.allclose(propensity1['se'], se1)
	assert_equal(set(propensity1.keys()), keys)


	propensity2 = p.PropensitySelect(data, [0, 1], 1, 2.71)
	lin2 = [0, 1]
	qua2 = []
	coef2 = np.array([6.8066090, -0.0244874, -0.7524939])
	loglike2 = -3.626517
	fitted2 = np.array([0.6491366, 0.3117840, 0.2911631,
	                    0.8086407, 0.3013733, 0.6379023])
	se2 = np.array([8.5373779, 0.4595191, 0.8106499])

	assert_equal(propensity2['lin'], lin2)
	assert_equal(propensity2['qua'], qua2)
	assert np.allclose(propensity2['coef'], coef2)
	assert np.allclose(propensity2['loglike'], loglike2)
	assert np.allclose(propensity2['fitted'], fitted2)
	assert np.allclose(propensity2['se'], se2)
def test_propensityselect():

    D = np.array([0, 0, 0, 1, 1, 1])
    X = np.array([[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]])
    Y = random_data(D_cur=D, X_cur=X)
    data = d.Data(Y, D, X)

    propensity1 = p.PropensitySelect(data, [], 1, 2.71)
    lin1 = [1]
    qua1 = []
    coef1 = np.array([6.5424027, -0.7392041])
    loglike1 = -3.627939
    fitted1 = np.array(
        [0.6522105, 0.2995088, 0.2995088, 0.7970526, 0.2995088, 0.6522105])
    se1 = np.array([6.8455179, 0.7641445])
    keys = {'lin', 'qua', 'coef', 'loglike', 'fitted', 'se'}

    assert_equal(propensity1['lin'], lin1)
    assert_equal(propensity1['qua'], qua1)
    assert np.allclose(propensity1['coef'], coef1)
    assert np.allclose(propensity1['loglike'], loglike1)
    assert np.allclose(propensity1['fitted'], fitted1)
    assert np.allclose(propensity1['se'], se1)
    assert_equal(set(propensity1.keys()), keys)

    propensity2 = p.PropensitySelect(data, [0, 1], 1, 2.71)
    lin2 = [0, 1]
    qua2 = []
    coef2 = np.array([6.8066090, -0.0244874, -0.7524939])
    loglike2 = -3.626517
    fitted2 = np.array(
        [0.6491366, 0.3117840, 0.2911631, 0.8086407, 0.3013733, 0.6379023])
    se2 = np.array([8.5373779, 0.4595191, 0.8106499])

    assert_equal(propensity2['lin'], lin2)
    assert_equal(propensity2['qua'], qua2)
    assert np.allclose(propensity2['coef'], coef2)
    assert np.allclose(propensity2['loglike'], loglike2)
    assert np.allclose(propensity2['fitted'], fitted2)
    assert np.allclose(propensity2['se'], se2)
Beispiel #22
0
def test_propensity():

	D = np.array([0, 0, 0, 1, 1, 1])
	X = np.array([[7, 8], [3, 10], [7, 10], [4, 7], [5, 10], [9, 8]])
	Y = random_data(D_cur=D, X_cur=X)

	data = d.Data(Y, D, X)
	propensity = p.Propensity(data, [0, 1], [])
	lin = [0, 1]
	qua = []
	coef = np.array([6.8066090, -0.0244874, -0.7524939])
	loglike = -3.626517
	fitted = np.array([0.6491366, 0.3117840, 0.2911631,
	                   0.8086407, 0.3013733, 0.6379023])
	se = np.array([8.5373779, 0.4595191, 0.8106499])
	keys = {'lin', 'qua', 'coef', 'loglike', 'fitted', 'se'}
	
	assert_equal(propensity['lin'], lin)
	assert_equal(propensity['qua'], qua)
	assert np.allclose(propensity['coef'], coef)
	assert np.allclose(propensity['loglike'], loglike)
	assert np.allclose(propensity['fitted'], fitted)
	assert np.allclose(propensity['se'], se)
	assert_equal(set(propensity.keys()), keys)
Beispiel #23
0
    def test_invite(self):
        """

        """
        self.ip.src = utils.random_ip()
        self.udp.sport = utils.random_port()

        self.message.uri = 'sip:{0}@{1}'.format(utils.random_data(20),
                                                utils.random_data(15))
        self.message.method = 'INVITE'
        self.message.headers = {
            'Call-ID':
            utils.random_tag(),
            'CSeq':
            '0 INVITE',
            'From':
            '"{0}" <sip:{1}@{2}>;tag={3}'.format(utils.random_data(10),
                                                 utils.random_data(20),
                                                 utils.random_data(15),
                                                 utils.random_tag()),
            'Max-Forwards':
            '{0}'.format(utils.random_number(2)),
            'To':
            '<sip:{0}@{1}>'.format(utils.random_data(20),
                                   utils.random_data(15)),
            'Via':
            'SIP/2.0/UDP {0}:{1};branch={2};rport'.format(
                utils.random_ip(), utils.random_port(), utils.random_tag()),
            'Content-Length':
            '0',
            'User-Agent':
            '{0}'.format(utils.random_data(30)),
            'Contact':
            '<sip:{0}@{1}:{2};transport=UDP>;'
            'q=1.00;agentid="{3}";'
            'methods="INVITE,NOTIFY,MESSAGE,ACK,BYE,CANCEL";'
            'expires={4}'.format(utils.random_data(20), utils.random_ip(),
                                 utils.random_data(20), utils.random_tag(),
                                 utils.random_number(2)),
            'Authorization':
            'Digest username="******", '
            'realm="{2}", '
            'nonce="{3}", '
            'uri="sip:{4}", '
            'qop=auth, nc=00000001, '
            'cnonce="{5}", '
            'response="{6}", '
            'opaque=""'.format(utils.random_data(20), utils.random_data(15),
                               utils.random_data(15), utils.random_tag(),
                               utils.random_data(15), utils.random_tag(),
                               utils.random_data(32))
        }
Beispiel #24
0
train_writer = tf.summary.FileWriter(savepath + 'train', sess.graph)
test_writer = tf.summary.FileWriter(savepath + 'test')
""" 
training session

"""

weight = z_c_mean.get_shape().as_list()[1] * z_c_mean.get_shape().as_list(
)[2]  # this is to normalize the loss shown in the terminal against the receptive field

for step in range(start_step, start_step + training_step):
    if step % 20000 == 0 or step == start_step:
        print('Starting @step %s' % step)

    batch = random_data(MRtrain,
                        batchsize=batchsize)[:batchsize, 22:22 + imageshape[0],
                                             17:17 + imageshape[1]]
    batch = np.expand_dims(batch, axis=-1)

    t_los, p_los, con_los, w_los, c_los, _ = sess.run(
        [t_loss, p_loss, con_loss, w_loss, c_loss, optimization],
        feed_dict={x: batch.reshape(batchsize, -1)})

    if step == 0 or os.path.isdir(savepath + 'model') == 0:

        os.makedirs(os.path.join(savepath, 'model', ''))

    if step % 5000 == 0 and step >= start_step or step == start_step + training_step - 1:

        model_path = saver.save(sess,
                                savepath + 'model/model.ckpt',