def __init__(self,sc):
        path_GDP = 'ml/GDP.txt'
        self.GDP = function.load(path_GDP)
        path_income = 'ml/ny_income_vacanhouse.csv'
        self.income = function.load(path_income)
        path_sale = 'ml/prepared.csv'
        self.sale = function.load(path_sale)
        path_food = 'ml/Retail_Food_Stores.csv'
        self.food = function.load(path_food)
        path_sub = 'processed/subway_station.csv'
        self.csv_sub = function.load(path_sub)
	    self.sc = sc
Esempio n. 2
0
class PredFunc(object):
    def __init__(self, miu, sigma, Pc, label):
        self.miu = miu
        self.sigma = sigma
        self.label = label
        self.Pc = Pc

    def cal(self, x):
        return self.Pc * np.prod(
            np.exp(-(x - self.miu)**2 / (2 * self.sigma**2)) /
            (2.507 * self.sigma))


def train(trainData, trainLabel):
    func = []
    num = len(trainLabel)
    newData, newLabel = F.divide(trainData, trainLabel)
    for C, y in zip(newData, newLabel):
        Pc = len(C) / num
        miu = np.average(C, axis=0)
        sigma = np.var(C, axis=0)
        func.append(PredFunc(miu, sigma, Pc, y))
    return func


trainData, trainLabel, _ = F.load(0, normalize)
testData, testLabel, _ = F.load(1, normalize)
func = train(trainData, trainLabel)
acc = F.test(testData, testLabel, func)
print(acc)
Esempio n. 3
0
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(m, 1, 13, 1, 0, 3)  # m input image channel

    def forward(self, x, y):
        x = self.conv1(x)
        return x * (-y + 1)


if __name__ == '__main__':
    #Set up network
    net = Net()
    for run in np.arange(1, 239 - k):
        data = func.load(run, run + 2 * k + m, size)
        rate = 0.00005
        criterion = nn.MSELoss()
        optimizer = torch.optim.SGD(net.parameters(), lr=rate, momentum=0.9)
        #prepare input and label
        input = func.input_change(data[0:m, :, :], resize, circle477,
                                  circle513, r)
        label = data[m + k - 1, :, :]
        mov_array = func.get_move(input, label)
        input = func.do_move(input, mov_array, circle513_resize)
        label = func.to_Variable(label)
        base1 = criterion(func.to_Variable(data[m - 1, :, :]),
                          label).data.numpy()[0]
        base2 = criterion(
            func.to_Variable(input[-1, recenter - center:recenter + center + 1,
                                   recenter - center:recenter + center + 1]),
Esempio n. 4
0
import function
from pyspark import SparkConf, SparkContext
import pyspark.sql.functions as f
sc = SparkContext()
sc.setLogLevel("ERROR")

# for table ny_subway_station:
path_sub = 'processed/subway_station.csv'
csv_sub = function.load(path_sub)
# transfer longitude, latitude to zip code
csv_sub1 = function.zipcode(csv_sub, sc)
csv_sub3 = function.subway_route(csv_sub1)
# save the table into postgresql
table_subway = 'subway'
function.save(csv_sub3, table_subway)

# process table ny_citibike_station:
path_bike = 'processed/citybike_station'
csv = function.load(path_bike)
csv_bike1 = function.zipcode(csv, sc)
csv_bike = function.citybike_station_count(csv_bike1)

# loading rolling sale and crime data:
path_sale_crime = 'processed/prepared.csv'
csv_sale_crime = function.load(path_sale_crime)

# calculate average sale price per square feet from rolling sale data after 2013
sale = function.avg_price_per_square_feet(csv_sale_crime)

# load population dataset to cauculate crime index
path_population = 'processed/census_population'
Esempio n. 5
0
class PredFunc(object):
    def __init__(self, ave, sigma, pi, label):
        self.ave = ave
        self.sigma = sigma
        self.pi = pi
        self.label = label

    def cal(self, x):
        return -0.5 / self.sigma * np.dot(
            (x - self.ave).T, x - self.ave) - 0.5 * np.log(np.abs(
                self.sigma)) + np.log(self.pi)


def train(trainData, trainLabel):
    func = []
    num = len(trainLabel)
    newData, newLabel = F.divide(trainData, trainLabel)
    for C, y in zip(newData, newLabel):
        ave = np.average(C, axis=0)
        pi = len(C) / num
        sigma = np.var(C)
        func.append(PredFunc(ave, sigma, pi, y))
    return func


trainData, trainLabel, num = F.load(0, True)
testData, testLabel, _ = F.load(1, True)
func = train(trainData, trainLabel)
acc = F.test(testData, testLabel, func)
print(acc)
Esempio n. 6
0
def train():
    global image_size, batch_size, lr_init, beta1, n_epoch_init, n_epoch, lr_decay, decay_round
    global save_step, checkpoint_path
    save_cnt = 0
    tl.files.exists_or_mkdir(checkpoint_path)

    image_gray = tf.placeholder(dtype=tf.float32,
                                shape=[batch_size, image_size, image_size, 1],
                                name="image_gray")
    image_color = tf.placeholder(dtype=tf.float32,
                                 shape=[batch_size, image_size, image_size, 3],
                                 name="image_color")
    """GAN's train inference"""
    net_g = network.network_g(image_gray=image_gray,
                              is_train=True,
                              reuse=False)
    d_input_real = tf.concat([image_gray, image_color], axis=3)
    d_input_fake = tf.concat([image_gray, net_g.outputs * 255], axis=3)
    net_d, logits_real = network.network_d(image_input=d_input_real,
                                           is_train=True,
                                           reuse=False)
    _, logits_fake = network.network_d(image_input=d_input_fake,
                                       is_train=True,
                                       reuse=True)
    """VGG's inference"""
    fake_224 = tf.image.resize_images(net_g.outputs, size=[224, 224], method=0)
    real_224 = tf.image.resize_images(image_color, size=[224, 224], method=0)
    """loss"""
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        d_loss_1 = tl.cost.sigmoid_cross_entropy(
            logits_real.outputs, tf.ones_like(logits_real.outputs))
        d_loss_2 = tl.cost.sigmoid_cross_entropy(
            logits_fake.outputs, tf.zeros_like(logits_fake.outputs))
        D_loss = d_loss_1 + d_loss_2
        g_gan_loss = tl.cost.sigmoid_cross_entropy(
            logits_fake.outputs, tf.ones_like(logits_fake.outputs))
        g_mse_loss = tf.reduce_mean(
            tf.losses.mean_squared_error(image_color, net_g.outputs * 255))
        G_loss = g_gan_loss + g_mse_loss
        """train op"""
        G_var = tl.layers.get_variables_with_name("network_g",
                                                  train_only=True,
                                                  printable=False)
        D_var = tl.layers.get_variables_with_name("network_d",
                                                  train_only=True,
                                                  printable=False)
        with tf.variable_scope('learn_rate'):
            lr_v = tf.Variable(lr_init, trainable=False)
        G_init_optimizer = tf.train.AdadeltaOptimizer(lr_v).minimize(
            g_mse_loss, var_list=G_var)
        D_optimizer = tf.train.AdadeltaOptimizer(lr_v).minimize(D_loss,
                                                                var_list=D_var)
        G_optimizer = tf.train.AdadeltaOptimizer(lr_v).minimize(G_loss,
                                                                var_list=G_var)
    """train"""
    with tf.Session() as sess:
        tl.layers.initialize_global_variables(sess)

        for epoch in range(n_epoch_init):
            img_list = func.init_list(image_size)
            epoch_time = time.time()
            n_iter, total_g_loss = 0, 0
            for idx in range(0, total, batch_size):
                step_time = time.time()
                if idx + batch_size > total:
                    break
                input_gray, input_color = func.load(size=image_size,
                                                    start=idx,
                                                    number=batch_size,
                                                    img_list=img_list)
                errG, _ = sess.run([g_mse_loss, G_init_optimizer],
                                   feed_dict={
                                       image_gray: input_gray,
                                       image_color: input_color
                                   })
                print "[TF] Epoch [%2d/%2d] %4d  time: %4.4fs, g_loss: %.8f" % (
                    epoch, n_epoch_init, n_iter, time.time() - step_time, errG)
                total_g_loss += errG
                n_iter += 1
            log = "[*] Epoch: [%2d/%2d] time: %4.4fs, g_loss: %.8f" % (
                epoch, n_epoch_init, time.time() - epoch_time,
                total_g_loss / n_iter)
            logging.info(log)

        for epoch in range(n_epoch - n_epoch_init):
            if epoch != 0 and (epoch % decay_round == 0):
                new_lr_decay = lr_decay**(epoch // decay_round)
                sess.run(tf.assign(lr_v, lr_init * new_lr_decay))
                log = "[*] Epoch:[%2d/%2d] new learning rate: %f (for GAN)" % (
                    epoch, n_epoch - n_epoch_init, lr_init * new_lr_decay)
                logging.info(log)
            elif epoch == 0:
                sess.run(tf.assign(lr_v, lr_init))
                log = "[%s] init lr: %f  decay_every_init: %d, lr_decay: %f (for GAN)" % (
                    time.ctime(), lr_init, decay_round, lr_decay)
                logging.info(log)

            img_list = func.init_list(image_size)
            n_iter, total_d_loss, total_g_loss, total_fake_loss = 0, 0, 0, 0
            epoch_time = time.time()
            for idx in range(0, total, batch_size):
                step_time = time.time()
                if idx + batch_size > total:
                    break
                input_gray, input_color = func.load(size=image_size,
                                                    start=idx,
                                                    number=batch_size,
                                                    img_list=img_list)
                errG, errD, _, _, _, _, d_fake_loss = sess.run([
                    G_loss, D_loss, G_optimizer, G_optimizer, G_optimizer,
                    D_optimizer, d_loss_2
                ],
                                                               feed_dict={
                                                                   image_gray:
                                                                   input_gray,
                                                                   image_color:
                                                                   input_color
                                                               })
                print "[TF] Epoch [%2d/%2d] %4d  time: %4.4fs, d_loss: %.8f(fake_loss: %.8f) g_loss: %.8f" % (
                    epoch, n_epoch - n_epoch_init, n_iter,
                    time.time() - step_time, errD, d_fake_loss, errG)
                total_d_loss += errD
                total_g_loss += errG
                total_fake_loss += d_fake_loss
                n_iter += 1
            log = "[%s] Epoch: [%2d/%2d] time: %4.4fs, d_loss: %.8f(fake_loss: %.8f) g_loss: %.8f" % (
                time.ctime(), epoch, n_epoch - n_epoch_init,
                time.time() - epoch_time, total_d_loss / n_iter,
                total_fake_loss / n_iter, total_g_loss / n_iter)
            logging.info(log)

            if epoch != 0 and (epoch + 1) % save_step == 0:
                log = "[%s] epoch %d, save as %s/g_%d.npz" % (
                    time.ctime(), epoch, checkpoint_path, save_cnt % 10)
                logging.info(log)
                tl.files.save_npz(net_g.all_params,
                                  name="%s/g_%d.npz" %
                                  (checkpoint_path, save_cnt % 10),
                                  sess=sess)
                tl.files.save_npz(net_d.all_params,
                                  name="%s/d_%d.npz" %
                                  (checkpoint_path, save_cnt % 10),
                                  sess=sess)
                save_cnt += 1
            else:
                print "[*] sorry.path=%s" % checkpoint_path