Ejemplo n.º 1
0
    def test_batch_norm_storage(self):
        x_train, x_test, y_train, y_test = simple_classification()

        batch_norm = layers.BatchNorm()
        gdnet = algorithms.MinibatchGradientDescent(
            [
                layers.Input(10),
                layers.Relu(5),
                batch_norm,
                layers.Sigmoid(1),
            ],
            batch_size=10,
        )
        gdnet.train(x_train, y_train)

        error_before_save = gdnet.prediction_error(x_test, y_test)
        mean_before_save = batch_norm.running_mean.get_value()
        inv_std_before_save = batch_norm.running_inv_std.get_value()

        with tempfile.NamedTemporaryFile() as temp:
            storage.save(gdnet, temp.name)
            storage.load(gdnet, temp.name)

            error_after_load = gdnet.prediction_error(x_test, y_test)
            mean_after_load = batch_norm.running_mean.get_value()
            inv_std_after_load = batch_norm.running_inv_std.get_value()

            self.assertAlmostEqual(error_before_save, error_after_load)
            np.testing.assert_array_almost_equal(mean_before_save,
                                                 mean_after_load)
            np.testing.assert_array_almost_equal(inv_std_before_save,
                                                 inv_std_after_load)
Ejemplo n.º 2
0
    def test_batch_norm_storage(self):
        x_train, x_test, y_train, y_test = simple_classification()

        batch_norm = layers.BatchNorm()
        gdnet = algorithms.GradientDescent(
            [
                layers.Input(10),
                layers.Relu(5),
                batch_norm,
                layers.Sigmoid(1),
            ],
            batch_size=10,
            verbose=True,  # keep it as `True`
        )
        gdnet.train(x_train, y_train, epochs=5)

        error_before_save = gdnet.prediction_error(x_test, y_test)
        mean_before_save = self.eval(batch_norm.running_mean)
        variance_before_save = self.eval(batch_norm.running_inv_std)

        with tempfile.NamedTemporaryFile() as temp:
            storage.save(gdnet, temp.name)
            storage.load(gdnet, temp.name)

            error_after_load = gdnet.prediction_error(x_test, y_test)
            mean_after_load = self.eval(batch_norm.running_mean)
            variance_after_load = self.eval(batch_norm.running_inv_std)

            self.assertAlmostEqual(error_before_save, error_after_load)
            np.testing.assert_array_almost_equal(mean_before_save,
                                                 mean_after_load)

            np.testing.assert_array_almost_equal(variance_before_save,
                                                 variance_after_load)
Ejemplo n.º 3
0
    def test_batch_norm_storage(self):
        x_train, x_test, y_train, y_test = simple_classification()

        batch_norm = layers.BatchNorm()
        gdnet = algorithms.MinibatchGradientDescent(
            [
                layers.Input(10),
                layers.Relu(5),
                batch_norm,
                layers.Sigmoid(1),
            ],
            batch_size=10,
        )
        gdnet.train(x_train, y_train)

        error_before_save = gdnet.prediction_error(x_test, y_test)
        mean_before_save = batch_norm.running_mean.get_value()
        inv_std_before_save = batch_norm.running_inv_std.get_value()

        with tempfile.NamedTemporaryFile() as temp:
            storage.save(gdnet, temp.name)
            storage.load(gdnet, temp.name)

            error_after_load = gdnet.prediction_error(x_test, y_test)
            mean_after_load = batch_norm.running_mean.get_value()
            inv_std_after_load = batch_norm.running_inv_std.get_value()

            self.assertAlmostEqual(error_before_save, error_after_load)
            np.testing.assert_array_almost_equal(mean_before_save,
                                                 mean_after_load)
            np.testing.assert_array_almost_equal(inv_std_before_save,
                                                 inv_std_after_load)
Ejemplo n.º 4
0
    def setup(self):
        # Set up your game here
        self.car_list = arcade.SpriteList()
        self.cone_list = arcade.SpriteList()
        self.barreira_list = arcade.SpriteList()

        self.car = arcade.Sprite("car.png", SPRITE_SCALING_CAR)
        self.car.center_x = 80  # Starting position
        self.car.center_y = 300
        self.car_list.append(self.car)
        #self.physics_engine = arcade.PhysicsEngineSimple(self.car,self.barreira_list)

        fundo = arcade.Sprite("lateral.png", 1.0)
        fundo.center_x = 492
        fundo.center_y = esqYpos
        self.barreira_list.append(fundo)

        fundo = arcade.Sprite("lateral.png", 1.0)
        fundo.center_x = 492
        fundo.center_y = dirYpos
        self.barreira_list.append(fundo)

        fundo = arcade.Sprite("fundo.png", 1.0)
        fundo.center_x = 20
        fundo.center_y = 300
        self.barreira_list.append(fundo)

        self.sensorE = arcade.Sprite("ponto.png", SPRITE_SCALING_DOT)
        self.sensorE.center_x = 274
        self.sensorE.center_y = 315

        self.sensorD = arcade.Sprite("ponto.png", SPRITE_SCALING_DOT)
        self.sensorD.center_x = 274
        self.sensorD.center_y = 285
        '''
        self.check = arcade.Sprite("cone.png",0.03) 
        self.check.center_x = 600
        self.check.center_y = 370
        '''

        #fundo = arcade.Sprite("fundo.png", 1.0)
        #fundo.center_x = 800
        #fundo.center_y = 300
        #self.barreira_list.append(fundo)

        #Importar rede antiga
        for filename in os.listdir('Saves/Rede1_4-3/'):
            if filename.startswith('SavedDriver_+' + CODINOME + '_'):
                dados = re.findall('\d+', filename)
                self.resetCount = int(dados[0])
                print(dados[1])
                self.maxDistance = float(dados[1])
                print(dados[2])
                save = "Saves/Rede1_4-3/" + filename
                storage.load(nn, filepath=save)

        pass
Ejemplo n.º 5
0
    def test_storage_load_invalid_source(self):
        connection = layers.join(
            layers.Input(10),
            layers.Sigmoid(5),
            layers.Sigmoid(2),
        )

        with self.assertRaisesRegexp(TypeError, "Source type is unknown"):
            storage.load(connection, object)
Ejemplo n.º 6
0
def on_epoch_end(gdnet):
    epoch = gdnet.last_epoch
    errors = gdnet.validation_errors

    if errors.previous() and errors.last() > errors.previous():
        # Load parameters and stop training
        storage.load(gdnet, 'training-epoch-{}.pickle'.format(epoch - 1))
        raise StopTraining("Training has been interrupted")
    else:
        # Save parameters after successful epoch
        storage.save(gdnet, 'training-epoch-{}.pickle'.format(epoch))
Ejemplo n.º 7
0
    def test_storage_load_unknown_parameter(self):
        connection = layers.join(
            layers.Input(10),
            layers.Relu(1),
        )

        with self.assertRaisesRegexp(ValueError, "Cannot load parameters"):
            storage.load(connection, {}, ignore_missed=False)

        # Nothing happens in case if we ignore it
        storage.load(connection, {}, ignore_missed=True)
Ejemplo n.º 8
0
        def on_epoch_end(network):
            epoch = network.last_epoch
            errors[epoch] = network.prediction_error(x_test, y_test)

            if epoch == 4:
                storage.load(
                    network.connection,
                    os.path.join(tempdir, 'training-epoch-2'))
                raise StopTraining('Stop training process after 4th epoch')
            else:
                storage.save(
                    network.connection,
                    os.path.join(tempdir, 'training-epoch-{}'.format(epoch)))
Ejemplo n.º 9
0
    def test_storage_load_from_dict(self):
        relu = layers.Relu(2, name='relu')
        connection = layers.Input(10) > relu

        weight = np.ones((10, 2))
        bias = np.ones((2,))

        storage.load(connection, {
            'relu': {
                'weight': weight,
                'bias': bias,
            }
        })

        np.testing.assert_array_almost_equal(weight, relu.weight.get_value())
        np.testing.assert_array_almost_equal(bias, relu.bias.get_value())
Ejemplo n.º 10
0
    def test_storage_save_load_save(self):
        connection = layers.join(
            layers.Input(10),
            layers.Sigmoid(5),
            layers.Sigmoid(2),
        )

        with tempfile.NamedTemporaryFile() as temp:
            storage.save(connection, temp.name)
            temp.file.seek(0)

            filesize_first = os.path.getsize(temp.name)

            storage.load(connection, temp.name)

        with tempfile.NamedTemporaryFile() as temp:
            storage.save(connection, temp.name)
            temp.file.seek(0)

            filesize_second = os.path.getsize(temp.name)

        self.assertEqual(filesize_first, filesize_second)
Ejemplo n.º 11
0
RESNET50_WEIGHTS_FILE = os.path.join(FILES_DIR, 'resnet50.hdf5')
DOG_IMAGE_PATH = os.path.join(CURRENT_DIR, 'images', 'german-shepherd.jpg')


def download_resnet50_weights():
    if not os.path.exists(RESNET50_WEIGHTS_FILE):
        download_file(
            url=
            "http://neupy.s3.amazonaws.com/tensorflow/imagenet-models/resnet50.hdf5",
            filepath=RESNET50_WEIGHTS_FILE,
            description='Downloading weights')

    print("File with ResNet-50 weights: {}".format(RESNET50_WEIGHTS_FILE))
    return RESNET50_WEIGHTS_FILE


if __name__ == '__main__':
    resnet50_weights_filename = download_resnet50_weights()
    resnet50 = architectures.resnet50()

    print("Recovering ResNet-50 parameters...")
    storage.load(resnet50, resnet50_weights_filename)

    print("Making prediction...")
    dog_image = load_image(DOG_IMAGE_PATH,
                           image_size=(256, 256),
                           crop_size=(224, 224))

    output = resnet50.predict(dog_image)
    print_top_n(output, n=5)
Ejemplo n.º 12
0
    layers.MaxPooling((2, 2)),
    layers.Convolution((512, 3, 3), padding=1, name="conv5_1") > layers.Relu(),
    layers.Convolution((512, 3, 3), padding=1, name="conv5_2") > layers.Relu(),
    layers.Convolution((512, 3, 3), padding=1, name="conv5_3") > layers.Relu(),
    layers.Convolution((512, 3, 3), padding=1, name="conv5_4") > layers.Relu(),
    layers.MaxPooling((2, 2)),
    layers.Reshape(),
    layers.Relu(4096, name="dense_1") > layers.Dropout(0.5),
    layers.Relu(4096, name="dense_2") > layers.Dropout(0.5),
    layers.Softmax(1000, name="dense_3"),
)

if not os.path.exists(VGG19_WEIGHTS_FILE):
    download_file(
        url=("http://srv70.putdrive.com/putstorage/DownloadFileHash/" "F9A70DEA3A5A4A5QQWE2301487EWQS/vgg19.pickle"),
        filepath=VGG19_WEIGHTS_FILE,
        description="Downloading weights",
    )

storage.load(vgg19, VGG19_WEIGHTS_FILE)

dog_image = load_image(os.path.join(CURRENT_DIR, "images", "dog.jpg"), image_size=(256, 256), crop_size=(224, 224))

# Disables dropout layer
with vgg19.disable_training_state():
    x = T.tensor4()
    predict = theano.function([x], vgg19.output(x))

output = predict(dog_image)
print_top_n(output[0], n=5)
## **3 - Feed features vector to an Artificial Neural Network such as SOM(Self-Organizing map) to reduce the dimensionality of the feature vector and then perform clustering**.
Used [Neupy Tutorials](https://github.com/itdxer/neupy/blob/master/notebooks/Looking%20inside%20of%20the%20VGG19%20using%20SOFM.ipynb) for reference
"""

!pip install neupy

"""## Initializing VGG19 Architecture"""

from neupy import architectures, storage
vgg19 = architectures.vgg19()
vgg19

"""## Loading pre-trained parameters from ImageNet"""

storage.load(vgg19, "/content/drive/My Drive/cs6140 project/vgg/vgg19.hdf5")

"""## Propagating images through network"""

dense_2 = vgg19.end('dense_2')
batch_size = 16
outputs = []
#batch_x = np.reshape(batc, [-1, 28, 28, 1])
print(images.shape)
for batch in tqdm(range(0, len(images), batch_size)):
    output = dense_2.predict(images[batch:batch + batch_size])
    outputs.append(output)
    
dense_2_output = np.concatenate(outputs, axis=0)
dense_2_output.shape
Ejemplo n.º 14
0
import os

import theano
from neupy import layers, storage, architectures

from imagenet_tools import (CURRENT_DIR, FILES_DIR, load_image, print_top_n,
                            download_file)

theano.config.floatX = 'float32'
ALEXNET_WEIGHTS_FILE = os.path.join(FILES_DIR, 'alexnet.pickle')

alexnet = architectures.alexnet()

if not os.path.exists(ALEXNET_WEIGHTS_FILE):
    download_file(
        url="http://neupy.s3.amazonaws.com/imagenet-models/alexnet.pickle",
        filepath=ALEXNET_WEIGHTS_FILE,
        description='Downloading weights')

storage.load(alexnet, ALEXNET_WEIGHTS_FILE)

dog_image = load_image(os.path.join(CURRENT_DIR, 'images', 'dog.jpg'),
                       image_size=(256, 256),
                       crop_size=(227, 227),
                       use_bgr=False)

predict = alexnet.compile()
output = predict(dog_image)
print_top_n(output, n=5)
Ejemplo n.º 15
0
import os

from neupy import storage, architectures

from imagenet_tools import (CURRENT_DIR, FILES_DIR, load_image, print_top_n,
                            download_file)

VGG16_WEIGHTS_FILE = os.path.join(FILES_DIR, 'vgg16.hdf5')
vgg16 = architectures.vgg16()

if not os.path.exists(VGG16_WEIGHTS_FILE):
    download_file(
        url=
        "http://neupy.s3.amazonaws.com/tensorflow/imagenet-models/vgg16.hdf5",
        filepath=VGG16_WEIGHTS_FILE,
        description='Downloading weights')

storage.load(vgg16, VGG16_WEIGHTS_FILE)

dog_image = load_image(os.path.join(CURRENT_DIR, 'images', 'dog.jpg'),
                       image_size=(256, 256),
                       crop_size=(224, 224))

output = vgg16.predict(dog_image)
print_top_n(output, n=5)
Ejemplo n.º 16
0
        [SliceChannels(192, 384), layers.Convolution((128, 3, 3), padding=1, name="conv_5_2"), layers.Relu()],
    ],
    layers.Concatenate(),
    layers.MaxPooling((3, 3), stride=(2, 2)),
    layers.Reshape(),
    layers.Relu(4096, name="dense_1") > layers.Dropout(0.5),
    layers.Relu(4096, name="dense_2") > layers.Dropout(0.5),
    layers.Softmax(1000, name="dense_3"),
)

if not os.path.exists(ALEXNET_WEIGHTS_FILE):
    download_file(
        url=("http://srv70.putdrive.com/putstorage/DownloadFileHash/" "F497B1D43A5A4A5QQWE2295998EWQS/alexnet.pickle"),
        filepath=ALEXNET_WEIGHTS_FILE,
        description="Downloading weights",
    )

storage.load(alexnet, ALEXNET_WEIGHTS_FILE)

dog_image = load_image(
    os.path.join(CURRENT_DIR, "images", "dog.jpg"), image_size=(256, 256), crop_size=(227, 227), use_bgr=False
)

# Disables dropout layer
with alexnet.disable_training_state():
    x = T.tensor4()
    predict = theano.function([x], alexnet.output(x))

output = predict(dog_image)
print_top_n(output[0], n=5)
Ejemplo n.º 17
0
fig.tight_layout()

from tools import download_file, load_image, deprocess

import theano

theano.config.floatX = 'float32'

from network import network

net = network()

import os
from neupy import storage

storage.load(net, WEIGHTS_FILE)

import numpy as np
import matplotlib.pyplot as plt

images = []
image_paths = []
target = []

for path, directories, image_names in os.walk(IMAGE_DIR):
    for image_name in image_names:
        image_path = os.path.join(path, image_name)
        image = load_image(image_path,
                           image_size=(224, 224),
                           crop_size=(224, 224))
Ejemplo n.º 18
0
def create_deeplab_model(resnet50_weights=None,
                         deeplab_weights=None,
                         size=None):
    print("Initializing ResNet-50 architecture...")

    SamePadConv = partial(Convolution, bias=None, padding='same')
    resnet50 = architectures.resnet50(
        input_shape=(size, size, 3),
        include_global_pool=False,
        in_out_ratio=16,
    )

    if resnet50_weights is not None:
        # Pre-trained ResNet-50 contains parameters for the final
        # classification layer. We don't use this layer and for this reason
        # we need to set ``ignore_missing=True``
        print("Recovering ResNet-50 parameters...")
        storage.load(resnet50, resnet50_weights, ignore_missing=True)

    in_height, in_width, _ = resnet50.input_shape
    out_height, out_width, _ = resnet50.output_shape

    resnet50_input = resnet50.layers[0]
    deeplab_input = Input(resnet50.output_shape, name='deeplab-input')

    print("Initializing Deeplab architecture...")
    deeplab = join(
        deeplab_input,

        # Atrous Spatial Pyramid Pooling
        parallel(
            SamePadConv((1, 1, 256)) > BatchNorm(),
            SamePadConv((3, 3, 256), dilation=6) > BatchNorm(),
            SamePadConv((3, 3, 256), dilation=12) > BatchNorm(),
            SamePadConv((3, 3, 256), dilation=18) > BatchNorm(), [
                GlobalPooling('avg'),
                Reshape((1, 1, -1)),
                SamePadConv((1, 1, 256)) > BatchNorm(),
                IncludeResidualInputs(deeplab_input),
                ResizeBilinear(),
            ]),
        Concatenate(),
        SamePadConv((1, 1, 256)) > BatchNorm(),

        # Convert to the classification maps
        Convolution((1, 1, 21), padding='same'),
        IncludeResidualInputs(resnet50_input),
        ResizeBilinear((in_height, in_width)),
        Softmax(name='segmentation-proba'),
    )

    if deeplab_weights is not None:
        print("Recovering Deeplab parameters...")
        storage.load(deeplab, deeplab_weights, ignore_missing=True)

    print("Patching layers...")
    patches = {
        BatchNorm: {
            'alpha': 1 - 0.997,
            'epsion': 1e-5,
        }
    }
    patch_layers(deeplab, patches)
    patch_layers(resnet50, patches)

    return resnet50, deeplab
Ejemplo n.º 19
0

def prepare_image(fname):
    with open(IMAGENET_MEAN_FILE, 'rb') as f:
        # Mean values is the average image accros all training dataset.
        # if dataset (1000, 3, 224, 224) then mean image shape
        # is (3, 224, 224) and computes as data.mean(axis=0)
        mean_values = pickle.load(f)

    image = read_image(fname, image_size=(256, 256), crop_size=(224, 224))
    # Convert RGB to BGR
    image[:, (0, 1, 2), :, :] = image[:, (2, 1, 0), :, :]
    return asfloat(image - mean_values)


environment.speedup()
resnet50 = architectures.resnet50()

if not os.path.exists(RESNET50_WEIGHTS_FILE):
    download_file(
        url="http://neupy.s3.amazonaws.com/imagenet-models/resnet50.pickle",
        filepath=RESNET50_WEIGHTS_FILE,
        description='Downloading weights')

storage.load(resnet50, RESNET50_WEIGHTS_FILE)
predict = resnet50.compile()

dog_image = prepare_image(DOG_IMAGE_PATH)
output = predict(dog_image)
print_top_n(output, n=5)
Ejemplo n.º 20
0
    layers.Convolution((1000, 1, 1), padding='valid', name='conv10'),
    layers.GlobalPooling(function=T.mean),
    layers.Reshape(),
    layers.Softmax(),
)

if not os.path.exists(SQUEEZENET_WEIGHTS_FILE):
    download_file(
        url=(
            "http://srv70.putdrive.com/putstorage/DownloadFileHash/"
            "6B0A15B43A5A4A5QQWE2304100EWQS/squeezenet.pickle"
        ),
        filepath=SQUEEZENET_WEIGHTS_FILE,
        description='Downloading weights'
    )

storage.load(squeezenet, SQUEEZENET_WEIGHTS_FILE)

monkey_image = load_image(
    os.path.join(CURRENT_DIR, 'images', 'titi-monkey.jpg'),
    image_size=(256, 256),
    crop_size=(224, 224))

# Disables dropout layer
with squeezenet.disable_training_state():
    x = T.tensor4()
    predict = theano.function([x], squeezenet.output(x))

output = predict(monkey_image)
print_top_n(output[0], n=5)
Ejemplo n.º 21
0
if __name__ == '__main__':
    args = parser.parse_args()
    env = environments[args.imsize]

    print("Loading data...")
    x_test, _, _, _ = load_data(env['test_data_file'])

    print("Initializing VIN...")
    VIN = create_VIN(
        env['input_image_shape'],
        n_hidden_filters=150,
        n_state_filters=10,
        k=env['k'],
    )
    print("Loading pre-trained VIN parameterss...")
    storage.load(VIN, env['pretrained_network_file'])

    plt.figure(figsize=(8, 8))
    gridspec = gridspec.GridSpec(5, 4, height_ratios=[0, 2, 2, 2, 2])
    gridspec.update(wspace=0.1, hspace=0.1)

    plt.suptitle('Trajectories between two points predicted by VIN ')

    plt.subplot(gridspec[0, :])
    plt.legend(
        handles=[
            mpatches.Patch(color='#A71C1B', label='Start'),
            mpatches.Patch(color='#F35D47', label='Trajectory'),
            mpatches.Patch(color='#007035', label='Goal'),
        ],
        loc=3,
Ejemplo n.º 22
0
        batch_size='full',
        verbose=False)

    env = gym.make('CartPole-v0')
    env.seed(0)  # To make results reproducible for the gym

    memory_size = 1000  # Number of samples stored in the memory
    memory = deque(maxlen=memory_size)

    if args.use_pretrained:
        if not os.path.exists(CARTPOLE_WEIGHTS):
            raise OSError("Cannot find file with pretrained weights "
                          "(File name: {})".format(CARTPOLE_WEIGHTS))

        print("Loading pretrained weights")
        storage.load(network, CARTPOLE_WEIGHTS)

    else:
        print("Start training")
        train_network(
            env,
            network,
            memory,
            n_games=150,  # Number of games that networks is going to play,
            max_score=200,  # Maximum score that network can achive in the game
            epsilon=0.2,  # Probability to select random action during the game
            gamma=0.99,
        )

        if not os.path.exists(FILES_DIR):
            os.mkdir(FILES_DIR)
Ejemplo n.º 23
0
import os

from neupy import storage, architectures

from imagenet_tools import (CURRENT_DIR, FILES_DIR, load_image,
                            print_top_n, download_file)


VGG19_WEIGHTS_FILE = os.path.join(FILES_DIR, 'vgg19.hdf5')
DOG_IMAGE_PATH = os.path.join(CURRENT_DIR, 'images', 'german-shepherd.jpg')
vgg19 = architectures.vgg19()

if not os.path.exists(VGG19_WEIGHTS_FILE):
    download_file(
        url="http://neupy.s3.amazonaws.com/tensorflow/imagenet-models/vgg19.hdf5",
        filepath=VGG19_WEIGHTS_FILE,
        description='Downloading weights')

storage.load(vgg19, VGG19_WEIGHTS_FILE)

dog_image = load_image(
    DOG_IMAGE_PATH,
    image_size=(256, 256),
    crop_size=(224, 224))

output = vgg19.predict(dog_image)
print_top_n(output, n=5)
Ejemplo n.º 24
0
import os

from neupy import storage, architectures

from imagenet_tools import (CURRENT_DIR, FILES_DIR, load_image, print_top_n,
                            download_file)

SQUEEZENET_WEIGHTS_FILE = os.path.join(FILES_DIR, 'squeezenet.hdf5')

# Networks weight ~4.8 Mb
squeezenet = architectures.squeezenet()

if not os.path.exists(SQUEEZENET_WEIGHTS_FILE):
    download_file(
        url=
        "http://neupy.s3.amazonaws.com/tensorflow/imagenet-models/squeezenet.hdf5",
        filepath=SQUEEZENET_WEIGHTS_FILE,
        description='Downloading weights')

storage.load(squeezenet, SQUEEZENET_WEIGHTS_FILE)

monkey_image = load_image(os.path.join(CURRENT_DIR, 'images',
                                       'titi-monkey.jpg'),
                          image_size=(227, 227),
                          crop_size=(227, 227),
                          use_bgr=True)

output = squeezenet.predict(monkey_image)
print_top_n(output, n=5)
Ejemplo n.º 25
0
import numpy as np
from neupy import layers, storage, algorithms
from neupy.exceptions import StopTraining

from agent import SillyWalker, Action, create_net

net = create_net()

storage.load(
    net,
    'nets/net',
)

walker = SillyWalker()

while not walker.done:
    s = np.array([list(walker.state) + [1]])
    prediction = net.predict(s)[0]

    walker.step(Action(*prediction))

    walker._env.render()
Ejemplo n.º 26
0
def on_epoch_end(optimizer):
    print("Last epoch: {}".format(optimizer.last_epoch))
    storage.load(optimizer, filepath='file.hdf5')
Ejemplo n.º 27
0
    layers.Reshape(),

    layers.Relu(4096, name='dense_1') > layers.Dropout(0.5),
    layers.Relu(4096, name='dense_2') > layers.Dropout(0.5),
    layers.Softmax(1000, name='dense_3'),
)

if not os.path.exists(VGG16_WEIGHTS_FILE):
    download_file(
        url=(
            "http://srv70.putdrive.com/putstorage/DownloadFileHash/"
            "5B7DCBF43A5A4A5QQWE2301430EWQS/vgg16.pickle"
        ),
        filepath=VGG16_WEIGHTS_FILE,
        description='Downloading weights'
    )

storage.load(vgg16, VGG16_WEIGHTS_FILE)

dog_image = load_image(os.path.join(CURRENT_DIR, 'images', 'dog.jpg'),
                       image_size=(256, 256),
                       crop_size=(224, 224))

# Disables dropout layer
with vgg16.disable_training_state():
    x = T.tensor4()
    predict = theano.function([x], vgg16.output(x))

output = predict(dog_image)
print_top_n(output[0], n=5)