示例#1
0
def main():
    global args
    args = parser.parse_args()
    np.random.seed(args.r_seed)
    tf.set_random_seed(args.r_seed)
    phrase_feature_dim = 6000
    region_feature_dim = 4096
    if args.spatial: 
        if args.dataset == 'flickr':
            region_feature_dim += 5
        else:
            region_feature_dim += 8
            
    # setup placeholders
    labels_plh = tf.placeholder(tf.float32, shape=[None, args.max_boxes]) #lable ~ batch_size * max_boxes
    phrase_plh = tf.placeholder(tf.float32, shape=[None,
                                                   phrase_feature_dim]) # batch_size * 6000
    region_plh = tf.placeholder(tf.float32, shape=[None, args.max_boxes,
                                                   region_feature_dim]) # batch_size * max_boxes * 4096
    train_phase_plh = tf.placeholder(tf.bool, name='train_phase')
    num_boxes_plh = tf.placeholder(tf.int32)
    
    plh = {}
    plh['num_boxes'] = num_boxes_plh
    plh['labels'] = labels_plh
    plh['phrase'] = phrase_plh
    plh['region'] = region_plh
    plh['train_phase'] = train_phase_plh
    
    test_loader = DataLoader(args, region_feature_dim, phrase_feature_dim,
                             plh, 'test')
    model = setup_model(args, phrase_plh, region_plh, train_phase_plh,
                        labels_plh, num_boxes_plh, region_feature_dim)
    if args.test:
        test(model, test_loader, model_name=args.resume)
        sys.exit()

    save_model_directory =  os.path.join('runs', args.name)
    if not os.path.exists(save_model_directory):
        os.makedirs(save_model_directory)

    train_loader = TorchDataLoader(args, region_feature_dim, phrase_feature_dim,
                              plh, 'train')
    val_loader = DataLoader(args, region_feature_dim, phrase_feature_dim,
                            plh, 'val')

    # training with Adam
    acc, best_adam = train(plh, model, train_loader, test_loader, args.resume)

    # finetune with SGD after loading the best model trained with Adam
    best_model_filename = os.path.join('runs', args.name, 'model_best')
    acc, best_sgd = train(model, train_loader, val_loader,
                          best_model_filename, False, acc)
    best_epoch = best_adam + best_sgd
    
    # get performance on test set
    test_acc = test(model, test_loader, model_name=best_model_filename)
    print('best model at epoch {}: {:.2f}% (val {:.2f}%)'.format(
        best_epoch, round(test_acc*100, 2), round(acc*100, 2)))
示例#2
0
def load_checkpoint(checkpoint_path):
    checkpoint = torch.load(checkpoint_path)
    arch = checkpoint['arch']
    in_features = get_input_features(arch)
    pretrained_model = get_torchvision_model(arch)
    hidden_units = checkpoint['hidden_units']
    model = setup_model(pretrained_model, in_features, hidden_units)
    model.load_state_dict(checkpoint['state_dict'])
    model.class_to_idx = checkpoint['class_to_idx']

    return model
示例#3
0
import logging

from aiohttp import web

from base import setup_database, setup_email, setup_report, setup_task
from config import setup_config
from model import setup_model
from routes import setup_routes, setup_middleware

logging.basicConfig(
    format='%(levelname)s: %(asctime)s [%(pathname)s:%(lineno)d] %(message)s',
    level=logging.NOTSET)

app = web.Application()
setup_config(app)

setup_routes(app)
setup_middleware(app)
setup_task(app)

setup_database(app)  # 依赖 config
setup_model(app)  # 依赖 database, config

setup_email(app)  # 依赖 config
setup_report(app)  # 依赖 config, database, email

host, port = app['config']['server']['host'], int(
    app['config']['server']['port'])
web.run_app(app, host=host, port=port)
示例#4
0
from flask import Flask
from flask_sqlalchemy import SQLAlchemy

from model import setup_model
from api_addrbook import api_addrbook

app = Flask(__name__)
app.register_blueprint(api_addrbook, url_prefix='/api/addrbook')

app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///addrbook.sqlite3'
db = SQLAlchemy(app)
app.config['model'] = setup_model(db)
db.create_all()

app.run(debug=True)
示例#5
0
"""

import json
import sys
import os

from skimage import io
from skimage import transform

from PIL import Image  #to show image
from model import setup_model

SIZE = (32, 32)
MODEL_FILE = "classifier.tfl"

model = setup_model()

model.load(MODEL_FILE)

filenames = sys.argv[1:]

for filename in filenames:
    filepath = os.path.abspath(filename)
    try:
        im = io.imread(filepath)
        im = transform.resize(im, SIZE)

        # img = Image.fromarray(im, 'RGB')
        # img.show()

    except ValueError:
示例#6
0
epochs = args.epochs

if not os.path.isdir(data_dir):
    print('{} is not a valid directory'.format(data_dir))
    exit()
if not utils.is_valid_architecture(architecture):
    print('{} is not a valid architecture'.format(architecture))
    exit()
if not torch.cuda.is_available() and args.gpu:
    print('WARNING : No Cuda available for training, will use CPU')

#Load data
trainloader, validloader, testloader = utils.load_data(data_dir)

# Get torchvision architecture
pre_trained_model = utils.get_torchvision_model(architecture)

# Build network
in_features = utils.get_input_features(architecture)
model = setup_model(pre_trained_model, in_features, hidden_units)
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), learning_rate)

# Train the network
train(model, trainloader, validloader, criterion, optimizer, args.gpu, epochs)

# Save the model checkpoint
class_to_idx = utils.get_datasets(data_dir)[0].class_to_idx
save_path = args.save_dir + '/checkpoint.pth'
utils.save_checkpoint(model, optimizer, architecture, hidden_units,
                      learning_rate, epochs, class_to_idx, save_path)
示例#7
0
import json
import sys
import os

from skimage import io
from skimage import transform

from model import setup_model

# TODO: handle argument parsing in a sane manner

SIZE = (32, 32)
MODEL_FILE = "cat-classifier.tfl"

model = setup_model()

model.load(MODEL_FILE)

filenames = sys.argv[1:]

for filename in filenames:
    filepath = os.path.abspath(filename)
    try:
        im = io.imread(filepath)
        im = transform.resize(im, SIZE)
    except ValueError:
        print("Unable to load: {:s}".format(filepath), file=sys.stderr)
    result = model.predict([im])
    print(json.dumps({
        "filepath": filepath,
示例#8
0
文件: run.py 项目: AndrewHess/convbot
def talk(args, vocab, rev_vocab):
    ''' Infinitely run the loop of user and bot talking with user feedback. '''

    global itr

    # Setup the models.
    gen, dis, full = load(args) if args.load else setup_model(args)

    # Setup the training data if it is from a file.
    if args.train_file is not None:
        # Make sure at least one model is being trained.
        assert (args.train != 'none')

        train_x, train_y = [], []

        # Read the data from train_file.
        with open(os.path.join(args.data_folder, args.train_file),
                  'r') as infile:
            for line in infile:
                line = line[:-1]  # Remove the newline.
                pos = line.find(':')
                train_x.append(line[:pos])
                train_y.append(line[pos + 1:])

        # Set each item in train_x and train_y to what is used as input.
        for (i, (x, y)) in enumerate(zip(train_x, train_y)):
            # Encode the data into word id numbers.
            x = encode_with_dict(x.split(' '), vocab)
            y = encode_with_dict(y.split(' '), vocab)

            # Get the data into the input format for the models.
            x = format_input(x)
            y = format_input(y)

            train_x[i] = x
            train_y[i] = y

    # Run the main loop.
    while itr < args.epochs:
        itr += 1

        if args.train is not 'none':
            print('iteration:', itr)

        if args.train_file is not None:
            # Use new random numbers for the input.
            train_x = [[x[0], np.random.normal(size=(1, num_rand))]
                       for x in train_x]

            # Get the generator predictions.
            pred = [gen.predict(x) for x in train_x]

            # Create the input for the discriminator.
            real_dis_input = np.concatenate([y[0] for y in train_y])
            prompt_input = np.concatenate([x[0] for x in train_x] * 2)
            word_input = np.concatenate((np.concatenate(pred), real_dis_input))
            dis_input = [prompt_input, word_input]

            # Create the input for the generator.
            gen_input = np.concatenate([x[0] for x in train_x])
            gen_input = [
                gen_input,
                np.random.normal(size=(len(train_x), num_rand))
            ]

            # Create the noisy labels.
            gen_labels = 1 - np.random.random_sample(size=(len(train_x),
                                                           1)) / 10
            dis_labels = np.concatenate(
                (1 - np.random.random_sample(size=(len(train_x), 1)) / 10,
                 np.random.random_sample(size=(len(train_x), 1)) / 10))

            # Randomly flip 5 percent of the discriminator's labels to keep the
            # discriminator loss from decreasing to 0 too quickly.
            for _ in range(int(len(train_x) * 0.05)):
                i = np.random.randint(0, len(train_x))
                k = len(train_x) - i - 1
                dis_labels[i][0], dis_labels[k][0] = dis_labels[k][
                    0], dis_labels[i][0]

            # Train and save the models.
            possibly_train_gen(gen, dis, full, gen_input, gen_labels, args)
            possibly_train_dis(gen, dis, full, dis_input, dis_labels, args)
            possibly_save(gen, dis, full, args)
        else:
            print(prompt, end='')

            try:
                gen_input = get_formatted_user_input(vocab)
            except ValueError:
                continue

            response = gen.predict(gen_input)
            bad_gen_out = [np.array(response), np.array([[1]])]

            # Get the most likely word for each position.
            response = np.argmax(response[0], axis=1)

            # Print the response.
            print(prompt, ' '.join(encode_with_dict(response, rev_vocab)))

            # Train the model.
            if args.train != 'none':
                # Get the label response from the user.
                print('Enter a good response:', end=' ')
                try:
                    good_gen_out = get_formatted_user_input(vocab)
                except ValueError:
                    continue

                # Setup the input for training the discriminator.
                dis_input = [
                    np.concatenate((gen_input[0], gen_input[0])),
                    np.concatenate((bad_gen_out[0], good_gen_out[0]))
                ]

                # Use noisy labels to help with training.
                gen_labels = np.random.random_sample(size=(1, 1)) / 10
                dis_labels = np.random.random_sample(size=(2, 1)) / 10
                gen_labels[0][0] = 1 - gen_labels[0][0]
                dis_labels[0][0] = 1 - dis_labels[0][0]

                # Train and save the models.
                possibly_train_gen(gen, dis, full, gen_input, gen_labels, args)
                possibly_train_dis(gen, dis, full, dis_input, dis_labels, args)
                possibly_save(gen, dis, full, args)

    # It will never reach this point.
    return
示例#9
0
                                               exp_config["batch_size"],
                                               exp_config["nn_joint_names"],
                                               im_trans,
                                               False,
                                               device,
                                               from_demo=90,
                                               to_demo=100)
"""
for i in range(0, 9):
    show_torched_im(train_set[i][0][0])
"""

# Train the model
log_path = "./logs/{}-{}".format(exp_config["experiment_name"], t_stamp())
full_model = setup_model(device, im_params["resize_height"],
                         im_params["resize_width"],
                         exp_config["nn_joint_names"])

lower_bounds = joints_lower_limits(exp_config["nn_joint_names"], robot_model)
upper_bounds = joints_upper_limits(exp_config["nn_joint_names"], robot_model)

print(lower_bounds)
print(upper_bounds)

# constraint = StayInZone(full_model, mbs, maxbs, nn_joint_names, robot_model)
# constraint = MoveSlowly(full_model, 2.0, nn_joint_names, robot_model)
# constraint = SmoothMotion(full_model, 0.5, nn_joint_names, robot_model)

# constraint = MatchOrientation(full_model, target_orientation, exp_config["nn_joint_names"], robot_model)
constraint = None