Ejemplo n.º 1
0
Archivo: Loop.py Proyecto: apd10/core
class Loop:
    def __init__(self, params):
        self.device_id = params["device_id"]
        self.epochs = params["epochs"]
        # data
        self.train_data = Data(params["train_data"])
        #self.test_data = Data(params["test_data"])
        #self.validation_data = Data(params["validation_data"])
        #self.progress_train_data = Data(params["progress_train_data"])
        self.progress_train_data = None
        self.progress_test_data = Data(params["progress_test_data"])
        # model
        self.model = Model.get(params["model"])
        print(self.model)
        if self.device_id != -1:
            self.model = self.model.cuda(self.device_id)
        # optimizer
        self.optimizer = Optimizer.get(self.model, params["optimizer"])
        # loss
        self.loss_func = Loss.get(params["loss"])
        #if self.device_id != -1:
        #  self.loss_func = self.loss_func.cuda(self.device_id)
        # progress evaluator
        self.progress_evaluator = ProgressEvaluator.get(
            params["progress_evaluator"], self.progress_train_data,
            self.progress_test_data, self.device_id)

    def loop(self):
        epoch = 0
        iteration = 0

        while epoch < self.epochs:
            self.train_data.reset()
            num_samples = self.train_data.len()
            batch_size = self.train_data.batch_size()
            num_batches = int(np.ceil(num_samples / batch_size))
            loc_itr = 0
            for i in tqdm(range(num_batches)):
                if self.train_data.end():
                    break
                self.model.train()
                self.optimizer.zero_grad()
                x, y = self.train_data.next()
                x = Variable(x).cuda(
                    self.device_id) if self.device_id != -1 else Variable(x)
                y = Variable(y).cuda(
                    self.device_id) if self.device_id != -1 else Variable(y)
                output = self.model(x)
                loss = self.loss_func(output, y)
                loss.backward()
                self.optimizer.step()
                self.progress_evaluator.evaluate(epoch, loc_itr, iteration,
                                                 self.model, self.loss_func)
                iteration = iteration + 1
                loc_itr = loc_itr + 1
                #print("Loss", loss)
            epoch = epoch + 1
Ejemplo n.º 2
0
Archivo: Stats.py Proyecto: apd10/core
class Stats:
    def __init__(self, params):
        self.device_id = params["device_id"]
        self.data = Data(params["data"])
        self.stat_file = params["stat_file"]

    def run(self):
        device_id = self.device_id
        num_samples = self.data.len()
        batch_size = self.data.batch_size()
        num_batches = int(np.ceil(num_samples / batch_size))
        loc_itr = 0
        mean = None
        mean_sum = None
        var = None
        var_sum = None

        self.data.reset()
        for i in tqdm(range(num_batches)):
            if self.data.end():
                break
            x, y = self.data.next()
            x = x.cuda(device_id)
            xsum = torch.sum(x, dim=0)
            if mean_sum is None:
                mean_sum = xsum
            else:
                mean_sum += xsum

        mean = mean_sum / num_samples

        self.data.reset()
        for i in tqdm(range(num_batches)):
            if self.data.end():
                break
            x, y = self.data.next()
            x = x.cuda(device_id)
            xcent = x - mean

            xsum = torch.sum(torch.mul(xcent, xcent), dim=0)
            if var_sum is None:
                var_sum = xsum
            else:
                var_sum += xsum

        var = var_sum / num_samples
        std = torch.sqrt(var)

        mean = np.array(mean.cpu())
        std = np.array(std.cpu())
        np.savez_compressed(self.stat_file, mu=mean, std=std)
#autoencoder1
stackcost = tf.reduce_mean(tf.square(tf.subtract(y, output)))
opt = tf.train.AdamOptimizer(0.185).minimize(stackcost)

sess = tf.Session()
sess.run(tf.global_variables_initializer())

#训练AutoEncoder1
for epoch in range(200):
    avg_cost = 0  #定义平均cost
    total_batch = int(len(data.train_data) / 64)
    data.num = 0
    gaussianNoise = 0.01 * np.random.normal(size=[64, 12288]).reshape(
        [64, 32, 128, 3])  #0.01高斯噪声
    for i in range(total_batch):
        traindata = data.batch_size([64, 32, 128, 3])  #读取[64, 32, 128, 3]大小的数据

        _, cost = sess.run(ae1.partial_fit(),
                           feed_dict={
                               ae1.x: traindata + gaussianNoise,
                               ae1.y: traindata
                           })
        avg_cost += cost / len(data.train_data) * 64

    print("Epoch:{},Cost:{:.9f}".format(epoch, avg_cost))
avg_cost = 0
total_batch = int(len(data.test_data) / 64)
data.num = 0
#测试AutoEncoder1
for i in range(total_batch):
    #print(data.test([-1,32,128,3]))