Ejemplo n.º 1
0
def run_attack(
    net: Network, x: torch.Tensor, y: torch.Tensor, epsilons: List[float],
    batch_id: int
) -> Tuple[pandas.DataFrame, List[torch.Tensor], List[torch.Tensor]]:
    x = x.to(net.device)
    y = y.to(net.device)

    x.requires_grad = True

    pred = net(x.view(-1, 28 * 28))

    loss = F.nll_loss(pred, y)
    net.zero_grad()
    loss.backward()
    data_grad = x.grad.data

    tmp_dict = {"id": [], "epsilon": [], "y": [], "y_": []}
    pertubed_images = []
    pertubation = []
    for epsilon in epsilons:
        pertubed_image, pert = fgsm_attack(x, epsilon, data_grad)
        pertubed_images.append(pertubed_image)
        pertubation.append(pert)

        pred = net(pertubed_image.view(-1, 28 * 28))

        y_ = pred.data.max(1).indices.item()

        tmp_dict["id"].append(batch_id)
        tmp_dict["epsilon"].append(epsilon)
        tmp_dict["y"].append(y.item())
        tmp_dict["y_"].append(y_)

    return pandas.DataFrame.from_dict(tmp_dict), pertubed_images, pertubation
def main():
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper(
    )
    net = Network([784, 30, 10])
    net.SGD(training_data=training_data,
            epochs=30,
            mini_batch_size=10,
            eta=3.0,
            test_data=test_data)
 def __init__(self, env, trainable=True, learning_rate=0.001, hidden=30):
     nn.Module.__init__(self)
     Network.__init__(self)
     self.env = env
     self.hidden = hidden
     self.actions = env.action_space.n
     self.build_model_()
     if trainable:
         self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)
     if USE_CUDA:
         self.cuda()
Ejemplo n.º 4
0
def train(net: Network, optimizer: torch.optim.Adam,
          train_loader: torch.utils.data.DataLoader, epoch: int):
    net.train()
    for batch_idx, (x, y) in enumerate(train_loader):
        optimizer.zero_grad()
        output = net(x.view(-1, 28 * 28).to(net.device))
        loss = F.nll_loss(output, y.to(net.device))
        loss.backward()
        optimizer.step()
        if batch_idx % 100 == 0:
            print(
                f"Train Epoch: {epoch}, Step: {batch_idx*len(x)}/{len(train_loader.dataset)}, Loss: {loss.item()}"
            )
Ejemplo n.º 5
0
def training():
    config = Configuration()

    net = Network()
    optimizer = optim.Adam(net.parameters(), lr=0.01)

    train_loader = get_train_loader()
    test_loader = get_test_loader()

    for epoch in range(config.nn_training_epochs):
        train(net, optimizer, train_loader, epoch)
        test(net, test_loader)

    net.save_model()
Ejemplo n.º 6
0
    def __init__(self):
        '''

        '''
        self.BATCH_SIZE = 32
        self.EPOCHS = 100000
        # self.image_dim = [-1, 32, 32, 3]                          # CIFAR-10
        self.image_dim = [-1, 28, 28, 1]  # MNIST
        # self.discriminator_input_dim = (None, 32, 32, 3)          # CIFAR-10
        self.discriminator_input_dim = (None, 784)  # MNIST
        self.net = Network()
        # self.data = utils.get_cifar10()                           # CIFAR-10
        self.data = utils.get_mnist()  # MNIST
        # self.logdir = "train_logs/cifar10/"                   # CIFAR-10
        self.logdir = "train_logs/mnist/"  # MNIST
Ejemplo n.º 7
0
def test(net: Network, test_loader: torch.utils.data.DataLoader):
    net.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for x, y in test_loader:
            x = x.to(net.device)
            y = y.to(net.device)
            pred = net(x.view(-1, 28 * 28))
            test_loss += F.nll_loss(pred, y, size_average=False).item()
            correct += torch.eq(pred.max(1).indices, y).sum().item()

        test_loss /= len(test_loader.dataset)
        print(
            f"\nTest set, Average loss: {test_loss}, Accuracy: {float(correct) / float(len(test_loader.dataset))}\n"
        )
def main():
    config = Configuration()

    image_id = 1362

    bnn = BNNWrapper()
    bnn.load_model()

    loss_fn = pyro.infer.Trace_ELBO(
        num_particles=config.bnn_adversary_samples
    ).differentiable_loss

    nn = Network()
    nn.load_model()

    test_loader = get_test_loader(1, shuffle=False)
    x, y = test_loader.dataset[image_id]

    y = torch.tensor([y])

    bnn_d, bnn_imgs, bnn_pertubation_imgs = bnn_adversary.run_attack(
        bnn, loss_fn, x, y, config.epsilons, image_id
    )

    nn_d, nn_imgs, nn_pertubation_imgs = nn_adversary.run_attack(
        nn, x, y, config.epsilons, 3
    )

    ids = [2, 5]

    utils.img_show(
        x,
        f"Image, BNN Prediction: {bnn_d.iloc[0]['y_']}, NN Prediction: {nn_d.iloc[0]['y_']}",
    )
    for id in ids:
        utils.img_two_show(
            bnn_pertubation_imgs[id].cpu(),
            f"BNN Noise (epsilon: {bnn_d.iloc[id]['epsilon']})",
            nn_pertubation_imgs[id].cpu(),
            f"NN Noise (epsilon: {bnn_d.iloc[id]['epsilon']})",
        )
        utils.img_two_show(
            bnn_imgs[id].cpu(),
            f"Noise added to image, BNN Prediction: {bnn_d.iloc[id]['y_']}",
            nn_imgs[id].cpu(),
            f"Noise added to image, NN Prediction: {nn_d.iloc[id]['y_']}",
        )
Ejemplo n.º 9
0
def main():
    D_train = DatasetCIFAR10('Train', 'datasets/cifar-10-batches-py/')
    (D_train, D_val) = split(D_train)

    network = Network(mini_inception_architecture,
                      input_shape=(32, 32, 3),
                      depth=32,
                      stride=1,
                      n_hidden=64,
                      n_classes=10)

    train(network, D_train, D_val)
Ejemplo n.º 10
0
    def __init__(self, sess, batch_size, num_episodes, actor_target,
                 actor_trainer, critic_target, critic_trainer,
                 trading_state_model, replay_buffer, datacontainer, gamma,
                 tau):
        self.sess = sess
        self.batch_size = batch_size
        self.num_episodes = num_episodes
        self.actor_target = actor_target
        self.actor_trainer = actor_trainer
        self.critic_target = critic_target
        self.critic_trainer = critic_trainer
        self.tsm = trading_state_model
        self.rpb = replay_buffer
        self.datacontainer = datacontainer
        self.gamma = gamma
        self.tau = tau

        self.sess.run(tf.global_variables_initializer())
        self.sess.run(
            Network.assign_target_graph("actor-trainer", "actor-target"))
        self.sess.run(
            Network.assign_target_graph("critic-trainer", "critic-target"))
def main():
    config = Configuration()

    image_id = 1362

    nn = Network()
    nn.load_model()

    test_loader = get_test_loader(1, shuffle=False)
    x, y = test_loader.dataset[image_id]

    y = torch.tensor([y])

    nn_d, nn_imgs, nn_pertubation_imgs = nn_adversary.run_attack(
        nn, x, y, config.epsilons, 3)

    id = 2

    utils.img_show(x, f"Image, NN Prediction: {nn_d.iloc[0]['y_']}")
    utils.img_show(nn_pertubation_imgs[id].cpu(),
                   f"Noise (epsilon: {nn_d.iloc[id]['epsilon']})")
    utils.img_show(nn_imgs[id].cpu(),
                   f"Noise added to image, Prediction: {nn_d.iloc[id]['y_']}")
Ejemplo n.º 12
0
    def __init__(self, sess, batch_size, num_episodes, episode_length,
                 actor_target, actor_trainer, critic_target, critic_trainer,
                 env, replay_buffer, gamma, tau, actor_noise):
        self.sess = sess
        self.batch_size = batch_size
        self.num_episodes = num_episodes
        self.episode_length = episode_length
        self.actor_target = actor_target
        self.actor_trainer = actor_trainer
        self.critic_target = critic_target
        self.critic_trainer = critic_trainer
        self.env = env
        self.rpb = replay_buffer
        self.gamma = gamma
        self.tau = tau
        self.actor_noise = actor_noise

        self.sess.run(tf.global_variables_initializer())
        self.sess.run(
            Network.assign_target_graph("actor-trainer", "actor-target"))
        self.sess.run(
            Network.assign_target_graph("critic-trainer", "critic-target"))
        self.writer = tf.summary.FileWriter("./tensorboard", sess.graph)
        self.build_summaries()
Ejemplo n.º 13
0
 def __init__(self, *args, shared_network=None, 
     value_network_path=None, policy_network_path=None, **kwargs):
     super().__init__(*args, **kwargs)
     if shared_network is None:
         self.shared_network = Network.get_shared_network(
             net=self.net, num_steps=self.num_steps, 
             input_dim=self.num_features)
     else:
         self.shared_network = shared_network
     self.value_network_path = value_network_path
     self.policy_network_path = policy_network_path
     if self.value_network is None:
         self.init_value_network(shared_network=shared_network)
     if self.policy_network is None:
         self.init_policy_network(shared_network=shared_network)
def main():
    D_train = DatasetCIFAR10('Train', 'datasets/cifar-10-batches-py/')

    enlarge_plot_area()
    display = Displayer(
        load_labels('datasets/cifar-10-batches-py/batches.meta'))
    display(D_train)

    (D_train, D_val) = split(D_train)

    network = Network(mini_inception_architecture,
                      input_shape=(32, 32, 3),
                      depth=32,
                      stride=1,
                      n_hidden=64,
                      n_classes=10)

    train(network, D_train, D_val, report=plot_report)
Ejemplo n.º 15
0
    def __init__(self,
                 *args,
                 list_stock_code=None,
                 list_chart_data=None,
                 list_training_data=None,
                 list_min_trading_unit=None,
                 list_max_trading_unit=None,
                 value_network_path=None,
                 policy_network_path=None,
                 **kwargs):
        assert len(list_training_data) > 0
        super().__init__(*args, **kwargs)
        self.num_features += list_training_data[0].shape[1]

        # 공유 신경망 생성
        self.shared_network = Network.get_shared_network(
            net=self.net,
            num_steps=self.num_steps,
            input_dim=self.num_features)
        self.value_network_path = value_network_path
        self.policy_network_path = policy_network_path
        if self.value_network is None:
            self.init_value_network(shared_network=self.shared_network)
        if self.policy_network is None:
            self.init_policy_network(shared_network=self.shared_network)

        # A2CLearner 생성
        self.learners = []
        for (stock_code, chart_data, training_data, min_trading_unit,
             max_trading_unit) in zip(list_stock_code, list_chart_data,
                                      list_training_data,
                                      list_min_trading_unit,
                                      list_max_trading_unit):
            learner = A2CLearner(*args,
                                 stock_code=stock_code,
                                 chart_data=chart_data,
                                 training_data=training_data,
                                 min_trading_unit=min_trading_unit,
                                 max_trading_unit=max_trading_unit,
                                 shared_network=self.shared_network,
                                 value_network=self.value_network,
                                 policy_network=self.policy_network,
                                 **kwargs)
            self.learners.append(learner)
def main(_):
    # load bottleneck data
    (D_train, D_val) = load_bottleneck_data(FLAGS.training_file, FLAGS.validation_file, FLAGS.breadth)

    print(D_train.X.shape, D_train.y.shape)
    print(D_val.X.shape, D_val.y.shape)

    # TODO: define your model and hyperparams here
    # make sure to adjust the number of classes based on
    # the dataset
    # 10 for cifar10
    # 43 for traffic
    network = Network(
        bottleneck_architecture,
        input_shape=D_train.X.shape[1:],
        n_hidden=64,
        n_classes=FLAGS.breadth
    )

    # TODO: train your model here
    train(network, D_train, D_val, learning_rate=0.01, epochs=100)
class NetworkTraining:
    def __init__(self, train_config, image_config):
        self.train_config = train_config
        self.image_config = image_config
        self.network = Network(train_config)

    def load_to_arrays(self, iteration=0, images_num_to_process=1):
        pure_input_train = self.image_config.image_to_array(
            iteration, images_num_to_process,
            self.train_config.get_image_to_array_train_input("pure"))
        noisy_input_train = self.image_config.image_to_array(
            iteration, images_num_to_process,
            self.train_config.get_image_to_array_train_input("noisy"))
        return pure_input_train, noisy_input_train

    def train(self):
        # Get the file paths
        kb.clear_session()
        gpus = tf.config.experimental.list_physical_devices('GPU')
        print("Num GPUs Available: ",
              len(tf.config.experimental.list_physical_devices('GPU')))
        if gpus:
            try:
                # Currently, memory growth needs to be the same across GPUs
                for gpu in gpus:
                    tf.config.experimental.set_memory_growth(gpu, True)
                logical_gpus = tf.config.experimental.list_logical_devices(
                    'GPU')
                print(len(gpus), "Physical GPUs,", len(logical_gpus),
                      "Logical GPUs")
            except RuntimeError as e:
                # Memory growth must be set before GPUs have been initialized
                print(e)

        print('Starting a training process ..')
        print('Preparing training data for CNN ..')
        # save output to logs
        old_stdout = sys.stdout
        timestr = time.strftime("%Y%m%d-%H%M%S")
        model_name = 'DEPTH_' + timestr + '.model'
        name = self.train_config.logs_path + r'/loss_output_' + model_name + '.log'
        log_file = open(name, "w")
        sys.stdout = log_file
        print('Loss function output of model :', model_name, '..')

        # Create a basic model instance
        model = self.network.get()
        compiled_model = model.compile()

        save_model_name = self.train_config.models_path + '/' + model_name
        images_num_to_process = 1000
        all_cropped_num = len(
            os.listdir(self.train_config.cropped_train_images_pure))
        iterations = all_cropped_num // images_num_to_process
        if all_cropped_num % images_num_to_process > 0:
            iterations += 1
        for i in range(iterations):
            print('*************** Iteration : ', i, '****************')
            first_image = i * images_num_to_process
            if i == iterations - 1:
                images_num_to_process = all_cropped_num - i * images_num_to_process
            if self.train_config.LOAD_TRAINED_MODEL:
                # create a dir where we want to copy and rename
                save_model_name = self.train_config.load_model_name + '_new'
                if not os.path.isdir(save_model_name):
                    shutil.copytree(self.train_config.load_model_name,
                                    save_model_name)
                compiled_model = keras.models.load_model(
                    save_model_name)  # used to continue training old models

            pure_input_train, noisy_input_train = self.load_to_arrays(
                first_image, images_num_to_process)
            if self.train_config.OUTPUT_EQUALS_INPUT:
                pure_input_train = noisy_input_train

            model.train(compiled_model, noisy_input_train, pure_input_train,
                        self.train_config.models_path)

            # save the model
            compiled_model.save(
                save_model_name)  # check if using same name is ok
            compiled_model = keras.models.load_model(save_model_name)
            #model = keras.models.load_model(save_model_name, custom_objects={'keras_custom_loss_function':keras_custom_loss_function})

        sys.stdout = old_stdout
        log_file.close()
Ejemplo n.º 18
0
        pred = net(pertubed_image.view(-1, 28 * 28))

        y_ = pred.data.max(1).indices.item()

        tmp_dict["id"].append(batch_id)
        tmp_dict["epsilon"].append(epsilon)
        tmp_dict["y"].append(y.item())
        tmp_dict["y_"].append(y_)

    return pandas.DataFrame.from_dict(tmp_dict), pertubed_images, pertubation


if __name__ == "__main__":
    config = Configuration()

    net = Network()
    net.load_model()

    net.eval()

    test_loader = get_test_loader(batch_size=1, shuffle=False)

    result = []
    for batch_id, (x, y) in enumerate(test_loader):
        df, _ = run_attack(net, x, y, config.epsilons, batch_id)
        result.append(df)

        if batch_id % 100 == 0:
            print(f"Step {batch_id}/{len(test_loader.dataset)}")

    result_df = pandas.concat(result)  # type: pandas.DataFrame
Ejemplo n.º 19
0
from networks import Network
import numpy as np
import os
import datetime
import input_data

BATCH_SIZE = 128
NOISE_SIZE = 100
EPOCHS = 500000
img_width, img_height, channel=32,32,1
is_training = True
LOG_FILE = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
helper.log_file(is_training, LOG_FILE)

with tf.device('/device:GPU:0'):
	net = Network()
	noise_vector = tf.placeholder(tf.float32,shape=(BATCH_SIZE, NOISE_SIZE))
	image = tf.placeholder(tf.float32,shape=(BATCH_SIZE, img_width, img_height,channel))

	initializer = tf.truncated_normal_initializer(stddev=0.02)

	generated_image = net.generator(noise_vector = noise_vector, initializer = initializer)
	Dx = net.discriminator(image = image, initializer = initializer)
	Dg = net.discriminator(image = generated_image, initializer = initializer, reuse = True)

	generator_loss = -tf.reduce_mean(tf.log(Dg))
	discriminator_loss = -tf.reduce_mean(tf.add(tf.log(Dx), tf.log(1.-Dg)))

	tvars = tf.trainable_variables()
	optimizerG = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5)
	optimizerD = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5)
 def __init__(self, networks, *args, **kwargs):
     Network.__init__(self)
     self.networks = [
       Network(*args, **kwargs)
       for Network in networks
     ]
Ejemplo n.º 21
0
    #x_train_ex_batch, _ = dataset.get_train_minibatch(100)
    #print(x_train_ex_batch.shape)

    num_samples = 30
    batch_size = 20

    #layers
    latent_units = [2]
    #latent_units = [50]
    hidden_units_q = [[200, 200]]
    hidden_units_p = [[200, 200]]

    # Create the model
    x = tf.placeholder(tf.float32, [batch_size, dataset.dim],
                       name='placeholder_x')
    iwae = Network.build_network(x, num_samples, latent_units, hidden_units_q,
                                 hidden_units_p, dataset.train_bias)
    params = iwae.params

    with tf.Session() as sess:

        # for debugging
        #sess = tf.InteractiveSession()
        #sess = tf_debug.LocalCLIDebugWrapperSession(sess)
        #sess.add_tensor_filter("has_nan", debug_has_nan)

        writer = tf.summary.FileWriter('./graphs', sess.graph)

        # Checkpoints
        dataset_name = 'MNIST'
        traintype = 'iwae'
        #path = ckpt_path_name(dataset_name, num_samples, traintype)
Ejemplo n.º 22
0
from PIL import Image

parser = argparse.ArgumentParser(
    description='Uses mnist nework to classify a png image')
parser.add_argument('--imagePath',
                    default=(str(Path.home())) +
                    '/Downloads/pixil-frame-0.png')
parser.add_argument('--networkPath', default='outputs/network.pkl')

args = parser.parse_args()
imagePath = args.imagePath
networkPath = args.networkPath

# load network
parameters = get_params_from_file(networkPath)
my_network = Network(*parameters, 'classification')

# load image
img = Image.open(imagePath)
img.thumbnail((28, 28), Image.ANTIALIAS)
img = np.asarray(img)
img_array = img / 255  # rescale to between 0 and 1
img_array = 1 - img_array
img_array = img_array[:, :,
                      0]  # image should be greyscale, pick only one channel

network_input = img_array.reshape(
    [-1, 1])  # reshapes image to 1d numpy array to feed into network
network_output = my_network.activate(network_input)
network_output = np.ravel(network_output)
x = np.arange(10)  # x labels
Ejemplo n.º 23
0
import time
import json
from pyb import Timer
import os

from adc import Adc
from lcd import Lcd
from scan import Scan
from outctl import Outctl
from networks import Network

adc = Adc()
lcd = Lcd()
scan = Scan()
outctl = Outctl()
network = Network()

up_state_cnt = 0  #上位机状态计数器
file_flag = 0  #是否写入文件
adc_freq = 1000  #adc采样频率
voltage_b = 0
current_b = 0
warnvalue = 10
warntime = 20
stopvalue = 20
stoptime = 20
time_now = ''


def up_state(tt):
    global up_state_cnt, file_flag
Ejemplo n.º 24
0
    n.add_person_to_picture(per1, pic4)
    n.add_person_to_picture(per5, pic4)

    return n


#n = dummy_network()

#print("Dummy network:", n)

#Network.save(n, 'test.p')

#n_load = Network.load('test.p')

n_load = Network.dummy(150, 500)

print(n_load.people)

source = list(n_load.people)[0]

#n_load.show()

print('source', source)

for neighbor in n_load.neighbors(source):
    print(neighbor)

print('Now traversing all')

for neighbor in n_load.traverse_all(source):
 def __init__(self, train_config, image_config):
     self.train_config = train_config
     self.image_config = image_config
     self.network = Network(train_config)
Ejemplo n.º 26
0
def dummy_network():
    n = Network()
    pic1 = Picture('famille.png')
    pic2 = Picture('annif.png')
    pic3 = Picture('fete.png')
    pic4 = Picture('soiree')

    per1 = Person('Jesus')
    per2 = Person('Juda')
    per3 = Person('Moise')
    per4 = Person('Charlie')
    per5 = Person('Antoine')

    n.add_person_to_picture(per1, pic1)
    n.add_person_to_picture(per2, pic1)
    n.add_person_to_picture(per3, pic1)

    n.add_person_to_picture(per3, pic2)
    n.add_person_to_picture(per4, pic2)
    n.add_person_to_picture(per5, pic2)

    n.add_person_to_picture(per1, pic3)
    n.add_person_to_picture(per5, pic3)

    n.add_person_to_picture(per1, pic4)
    n.add_person_to_picture(per5, pic4)

    return n
Ejemplo n.º 27
0
class GAN(object):
    def __init__(self):
        '''

        '''
        self.BATCH_SIZE = 32
        self.EPOCHS = 100000
        # self.image_dim = [-1, 32, 32, 3]                          # CIFAR-10
        self.image_dim = [-1, 28, 28, 1]  # MNIST
        # self.discriminator_input_dim = (None, 32, 32, 3)          # CIFAR-10
        self.discriminator_input_dim = (None, 784)  # MNIST
        self.net = Network()
        # self.data = utils.get_cifar10()                           # CIFAR-10
        self.data = utils.get_mnist()  # MNIST
        # self.logdir = "train_logs/cifar10/"                   # CIFAR-10
        self.logdir = "train_logs/mnist/"  # MNIST

    def train_ops(self):
        '''

        :return:
        '''
        self.input_placeholder = tf.placeholder(
            tf.float32, shape=self.discriminator_input_dim, name='input')
        random_z = tf.random_normal([self.BATCH_SIZE, 100],
                                    mean=0.0,
                                    stddev=1.0,
                                    name='random_z')

        self.generator = self.net.mnist_generator(random_z,
                                                  is_training=True,
                                                  name='generator')

        self.real_discriminator = self.net.mnist_discriminator(
            self.input_placeholder, is_training=True, name='discriminator')
        self.fake_discriminator = self.net.mnist_discriminator(
            self.generator, is_training=True, reuse=True, name='discriminator')

        self.real_discriminator_loss = tf.losses.sigmoid_cross_entropy(
            tf.constant(1, shape=[self.BATCH_SIZE]),
            self.real_discriminator,
            scope='real_discriminator_loss')
        self.fake_discriminator_loss = tf.losses.sigmoid_cross_entropy(
            tf.constant(0, shape=[self.BATCH_SIZE]),
            self.fake_discriminator,
            scope='fake_discriminator_loss')
        self.discriminator_loss = self.real_discriminator_loss + self.fake_discriminator_loss

        self.generator_loss = tf.losses.sigmoid_cross_entropy(
            tf.constant(1, shape=[self.BATCH_SIZE]),
            self.fake_discriminator,
            scope='generator_loss')
        ## training variables
        self.discriminator_variables = tf.get_collection(
            tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')
        self.generator_variables = tf.get_collection(
            tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')
        ## update ops
        discriminator_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                                     scope='discriminator')
        generator_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                                 scope='generator')

        global_step = tf.Variable(0, name='global_step', trainable=False)

        with tf.control_dependencies(discriminator_update_ops):
            self.train_discriminator = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5). \
                minimize(self.discriminator_loss, var_list=self.discriminator_variables)
        with tf.control_dependencies(generator_update_ops):
            self.train_generator = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5). \
                minimize(self.generator_loss, var_list=self.generator_variables, global_step=global_step)

    def summary_ops(self):
        '''

        :return:
        '''
        self.summary_discriminator = tf.summary.merge([
            tf.summary.scalar('summary/real_discriminator_loss',
                              self.real_discriminator_loss),
            tf.summary.scalar('summary/fake_discriminator_loss',
                              self.fake_discriminator_loss),
            tf.summary.scalar('summary/discriminator_loss',
                              self.discriminator_loss)
        ])

        input_img = tf.reshape(self.input_placeholder, self.image_dim)
        gen_img = tf.reshape(self.generator, self.image_dim)
        # input_visualisation = tf.cast(((self.input_placeholder / 2.0) + 0.5) * 255.0, tf.uint8)
        # generator_visualisation = tf.cast(((self.generator / 2.0) + 0.5) * 255.0, tf.uint8)
        input_visualisation = tf.cast(((input_img / 2.0) + 0.5) * 255.0,
                                      tf.uint8)
        generator_visualisation = tf.cast(((gen_img / 2.0) + 0.5) * 255.0,
                                          tf.uint8)

        self.summary_input = tf.summary.image('summary/input',
                                              input_visualisation,
                                              max_outputs=3)

        self.summary_generator = tf.summary.merge([
            tf.summary.image('summary/generator',
                             generator_visualisation,
                             max_outputs=3),
            tf.summary.scalar('summary/generator_loss', self.generator_loss)
        ])

    def train(self):
        '''

        :return:
        '''
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            logwriter = tf.summary.FileWriter(self.logdir, sess.graph)
            for i in range(self.EPOCHS):
                inp = self.data[np.random.choice(self.data.shape[0],
                                                 self.BATCH_SIZE)]

                (_, sum_dis) = sess.run(
                    (self.train_discriminator, self.summary_discriminator),
                    feed_dict={self.input_placeholder: inp})

                (_, sum_gen) = sess.run(
                    (self.train_generator, self.summary_generator))

                s = sess.run(self.summary_input,
                             feed_dict={self.input_placeholder: inp})

                if i % 100 == 0:
                    print(i)
                    logwriter.add_summary(sum_dis, i)
                    logwriter.add_summary(sum_gen, i)
                    logwriter.add_summary(s, i)
Ejemplo n.º 28
0
def train_without_pruning():
    tf.compat.v1.reset_default_graph()

    # Inference
    network = Network(NUM_CLASSES)
    inputs = tf.compat.v1.placeholder(tf.float32, [None, INPUT_SIZE, INPUT_SIZE, INPUT_CHANNEL], 'inputs')
    logits = network.inference(inputs)

    # loss & accuracy
    labels = tf.compat.v1.placeholder(tf.int64, [None, ], 'labels')
    loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
    prediction = tf.argmax(tf.nn.softmax(logits), axis=1)
    acc = tf.reduce_mean(tf.cast(tf.equal(prediction, labels), dtype=tf.float32))

    # optimizer
    global_step = tf.train.get_or_create_global_step()
    optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate=LEARNING_RATE, momentum=0.9)
    train_op = optimizer.minimize(loss, global_step)

    # loading data
    train_next = load_tfrecords('train')
    test_next = load_tfrecords('test')

    with tf.compat.v1.Session() as sess:
        sess.run(tf.compat.v1.global_variables_initializer())

        # summaries
        logs_dir = './logs/without_pruning'
        if not os.path.exists(logs_dir):
            os.makedirs(logs_dir)
        tf.compat.v1.summary.scalar('monitor/loss', loss)
        tf.compat.v1.summary.scalar('monitor/acc', acc)
        merged_summary_op = tf.compat.v1.summary.merge_all()
        train_summary_writer = tf.compat.v1.summary.FileWriter(os.path.join(logs_dir, 'train'), graph=sess.graph)
        test_summary_writer = tf.compat.v1.summary.FileWriter(os.path.join(logs_dir, 'test'), graph=sess.graph)

        best_acc = 0
        saver = tf.compat.v1.train.Saver()
        for epoch in range(NUM_EPOCHS):
            # training
            num_steps = TRAIN_SIZE // BATCH_SIZE
            train_acc = 0
            train_loss = 0
            for step in range(num_steps):
                x, y = sess.run(train_next)
                _, summary, train_acc_batch, train_loss_batch = sess.run([train_op, merged_summary_op, acc, loss],
                                                                         feed_dict={inputs: x, labels: y})
                train_acc += train_acc_batch
                train_loss += train_loss_batch
                sys.stdout.write("\r epoch %d, step %d, training accuracy %g, training loss %g" %
                                 (epoch + 1, step + 1, train_acc_batch, train_loss_batch))
                sys.stdout.flush()
                train_summary_writer.add_summary(summary, global_step=epoch * num_steps + step)
                train_summary_writer.flush()
            print("\n epoch %d, training accuracy %g, training loss %g" %
                  (epoch + 1, train_acc / num_steps, train_loss / num_steps))

            # testing
            num_steps = TEST_SIZE // BATCH_SIZE
            test_acc = 0
            test_loss = 0
            for step in range(num_steps):
                x, y = sess.run(test_next)
                summary, test_acc_batch, test_loss_batch = sess.run([merged_summary_op, acc, loss],
                                                                    feed_dict={inputs: x, labels: y})
                test_acc += test_acc_batch
                test_loss += test_loss_batch
                test_summary_writer.add_summary(summary, global_step=(epoch * num_steps + step) * (TRAIN_SIZE // TEST_SIZE))
                test_summary_writer.flush()
            print(" epoch %d, testing accuracy %g, testing loss %g" %
                  (epoch + 1, test_acc / num_steps, test_loss / num_steps))

            if test_acc / num_steps > best_acc:
                best_acc = test_acc / num_steps
                saver.save(sess, './ckpt_without_pruning/model')

        print(" Best Testing Accuracy %g" % best_acc)
        testloaders.append(DataLoader(BIO_tests[j], shuffle=False,batch_size=BATCH_SIZE))
        j +=1


device = "cuda" if torch.cuda.is_available() else "cpu" # Configure device
print('GPU USED?',torch.cuda.is_available())

################# MODELS#####################

models =[]
optimizers =[]
criterions =[]
j = 0
for i in range(len_class):
    for event in events:
            model = Network(output_dim=numb_class[i])
            model = model.to(device)
            model.eval()
            models.append(model)
            weights_list[j] = weights_list[j].to(device)
            criterions.append(nn.CrossEntropyLoss(weight=weights_list[j]))
            optimizers.append(optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY))
            del model
            j +=1


#### MODE-SPECIFIC CLASSIFIER SCHEME
# 1 : LW, 2:RA, 3:RD, 4:SA, 5:SD, 6:Stand
# MODEL1: LW(1)-> LW(1), RA(2), RD(3), SA(4), SD(5), Stand(6)
# MODEL2: RA(2)-> LW(1), RA(2)
# MODEL3: RD(3)-> LW(1), RD(3)
Ejemplo n.º 30
0
def create_model(args, atomrefs, means, stddevs, properties, avg_n_atoms):
    ssp = rescaled_act.ShiftedSoftplus(beta=args.beta)
    kernel_conv = create_kernel_conv(
        cutoff=args.rad_maxr,
        n_bases=args.rad_nb,
        n_neurons=args.rad_h,
        n_layers=args.rad_L,
        act=ssp,
        radial_model=args.radial_model
    )

    sp = rescaled_act.Softplus(beta=args.beta)
    if args.res:
        net = ResNetwork(
            kernel_conv=kernel_conv,
            embed=args.embed,
            l0=args.l0,
            l1=args.l1,
            l2=args.l2,
            l3=args.l3,
            L=args.L,
            scalar_act=sp,
            gate_act=rescaled_act.sigmoid,
            avg_n_atoms=avg_n_atoms
        )
    else:
        net = Network(
            kernel_conv=kernel_conv,
            embed=args.embed,
            l0=args.l0,
            l1=args.l1,
            l2=args.l2,
            l3=args.l3,
            L=args.L,
            scalar_act=sp,
            gate_act=rescaled_act.sigmoid,
            avg_n_atoms=avg_n_atoms
        )

    ident = torch.nn.Identity()

    if args.mlp_out:
        outnet = OutputMLPNetwork(
            kernel_conv=kernel_conv,
            previous_Rs=net.Rs[-1],
            l0=args.outnet_l0,
            l1=args.outnet_l1,
            l2=args.outnet_l2,
            l3=args.outnet_l3,
            L=args.outnet_L,
            scalar_act=sp,
            gate_act=rescaled_act.sigmoid,
            mlp_h=args.outnet_neurons,
            mlp_L=args.outnet_layers,
            avg_n_atoms=avg_n_atoms
        )
    else:
        outnet = OutputScalarNetwork(
            kernel_conv=kernel_conv,
            previous_Rs=net.Rs[-1],
            scalar_act=ident,
            avg_n_atoms=avg_n_atoms
        )

    output_modules = [
        spk.atomistic.Atomwise(
            property=prop,
            mean=means[prop],
            stddev=stddevs[prop],
            atomref=atomrefs[prop],
            outnet=outnet,
            # aggregation_mode='sum'
        ) for prop in properties
    ]
    model = spk.AtomisticModel(net, output_modules)
    return model