示例#1
0
from flask import Flask
from flask_restful import reqparse, abort, Api, Resource
import numpy as np
from prediction import Prediction
from build_model import get_model

app = Flask(__name__)
api = Api(app)

model = get_model()

api.add_resource(Prediction, '/', resource_class_kwargs={'model': model})
if __name__ == '__main__':
    app.run(debug=True)
示例#2
0
    #load dataset set, in this way, the loading process will be very quick
    train_X = np.load("../../dataset/fer2013/train_X.npy")
    #print (X_train.shape)
    train_y = np.load("../../dataset/fer2013/train_y.npy")
    #print (y_train.shape)
    validation_X = np.load("../../dataset/fer2013/validation_X.npy")
    validation_y = np.load("../../dataset/fer2013/validation_y.npy")
    
    mean_X = np.load("../../dataset/fer2013/X_mean.npy")
    train_X -= mean_X
    train_X = train_X.reshape(train_X.shape[0], 48, 48, 1)
    train_y = keras.utils.to_categorical(train_y, num_classes = 7)
    
    validation_X -= mean_X
    validation_X = validation_X.reshape(validation_X.shape[0], 48, 48, 1)
    validation_y = keras.utils.to_categorical(validation_y, num_classes = 7)
    
    
    model = build_model.get_model()
   
    epochs = 16
    batch_size = 128
    history = model.fit(train_X, train_y, epochs=epochs, batch_size=batch_size, verbose=2, validation_data = (validation_X, validation_y))
    build_model.plot_training(history, "base")
    
    #fsock.close() 
    model.save('../../model/CNN_expression_baseline.h5')
    print ("finish")
    
     
示例#3
0
文件: train.py 项目: chan8616/PoAI
def train_model(config, save_dir):

    model_path = save_dir + 'Model/'
    make_savedir(model_path)

    #load dataset
    dataloader, len_dataset, dataset_name = load_dataloader(config, save_dir)
    in_ch = dataloader['train'].dataset[0][0].shape[0]
    classes = dataloader['train'].dataset.classes
    width  = dataloader['train'].dataset[0][0].shape[-1]
    phases = dataloader.keys()
    num_classes = len(classes)

    #load model
    for key in vars(config).keys():
        if ('model' in key) and vars(config)[key] != None:
            model_name = vars(config)[key]
            break

    model = get_model(model_name, in_ch, num_classes, config.preTrain)
    model.classes = classes
    model, device, parallel = config_device(config, model)

    criterion = nn.CrossEntropyLoss()
    optimizer = get_optim(config, model)

    if config.save_best:
        best_model_wts = copy.deepcopy(model.state_dict())

    if config.tfboard:
        writer, tfboard_path = get_tfboard_writer(save_dir, model_name, dataset_name)
        images, _ = next(iter(dataloader['train'])) 
        if parallel:
            writer.add_graph(model.module, images.to(device))
        else:
            writer.add_graph(model, images.to(device))

    num_epochs = config.epoch
    best_acc = 0

    for epoch in range(num_epochs):
        epoch_start_time = time.time()
        print('\n\nEpoch {}/{}'.format(epoch+1, num_epochs))
        print('-' * 60)

        for phase in phases:
            if phase == 'train':
                model.train()
            elif phase =='valid':
                model.eval()
            else:
                break
            running_loss = 0.0
            running_corrects = 0

            for inputs, labels in dataloader[phase]:
                if len(labels) == 1:
                    continue
                inputs = inputs.to(device)
                labels = labels.to(device)

                optimizer.zero_grad()

                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(inputs)
                    _, preds = torch.max(outputs, 1)
                    loss = criterion(outputs, labels)

                    if phase == 'train':
                        loss.backward()
                        optimizer.step()
                
                running_loss = loss.item() *inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)
        
            epoch_loss = running_loss / len_dataset[phase]
            epoch_acc = running_corrects.double() / len_dataset[phase]

            print('<{}>\t\tLoss : {:.4f}\t\tAcc : {:.4f}'.format(
                phase, epoch_loss, epoch_acc))
        
            if config.tfboard:
                writer.add_scalar(phase+'_loss', epoch_loss, global_step=epoch+1)
                writer.add_scalar(phase+'_acc', epoch_acc, global_step=epoch+1)

        if config.tfboard:
            writer.add_figure('valid : predictions vs. actuals',
                plot_classes_preds(model, inputs, classes, labels), global_step=epoch+1)
            if epoch == 0:
                tb = program.TensorBoard()
                tb.configure(argv=[None, '--logdir', tfboard_path])
                url = tb.launch()
                webbrowser.open(url)

        time_elapsed = time.time()-epoch_start_time
        print('elapsed time : {:.0f}m {:.0f}s'.format(
            time_elapsed//60, time_elapsed%60))

        if epoch_acc > best_acc:
            best_acc = epoch_acc
            if config.save_best:
                best_model_wts = copy.deepcopy(model.state_dict())
            else:
                path = model_path+'_'.join([model_name, str(in_ch), str(num_classes),dataset_name, '{:.4f}'.format(best_acc.item()),time_stamp]) + '.pth'
                print('save ', path.split('/')[-1])
                if parallel :
                    # torch.save(model.module.state_dict(), path)
                    torch.save(model.module, path)
                else:
                    # torch.save(model.state_dict(), path)
                    torch.save(model, path)
        print("Best valid Acc: {:4f}".format(best_acc))
        print('-' * 60)

    if config.save_best:
        model.load_state_dict(best_model_wts)
        path = model_path+'_'.join([model_name, str(in_ch), str(num_classes), str(width), dataset_name, '{:.4f}'.format(best_acc.item()),time_stamp]) + '.pth'
        print('save ', path)
        print('save ', path.split('/')[-1])
        if parallel :
            # torch.save(model.module.state_dict(), path)
            torch.save(model.module, path)
        else:
            # torch.save(model.state_dict(), path)
            torch.save(model, path)

    if 'test' in phases:
        test_model(model, dataloader['test'], classes, device)

    if config.tfboard:
        writer.close()
    print('\nComplete training\n')
def run_style_transfer(content_path,
                       style_path,
                       num_iterations=1000,
                       content_weight=1e3,
                       style_weight=1e-2):
    # We don't need to (or want to) train any layers of our model, so we set their
    # trainable to false.
    model = get_model()
    for layer in model.layers:
        layer.trainable = False

    # Get the style and content feature representations (from our specified intermediate layers)
    style_features, content_features = get_feature_representations(
        model, content_path, style_path)
    gram_style_features = [
        gram_matrix(style_feature) for style_feature in style_features
    ]

    # Set initial image
    init_image = load_and_process_img(content_path)
    init_image = tfe.Variable(init_image, dtype=tf.float32)
    # Create our optimizer
    opt = tf.train.AdamOptimizer(learning_rate=5, beta1=0.99, epsilon=1e-1)

    # For displaying intermediate images
    iter_count = 1

    # Store our best result
    best_loss, best_img = float('inf'), None

    # Create a nice config
    loss_weights = (style_weight, content_weight)
    cfg = {
        'model': model,
        'loss_weights': loss_weights,
        'init_image': init_image,
        'gram_style_features': gram_style_features,
        'content_features': content_features
    }

    # For displaying
    num_rows = 2
    num_cols = 5
    display_interval = num_iterations / (num_rows * num_cols)
    start_time = time.time()
    global_start = time.time()

    norm_means = np.array([103.939, 116.779, 123.68])
    min_vals = -norm_means
    max_vals = 255 - norm_means

    imgs = []
    for i in range(num_iterations):
        grads, all_loss = compute_grads(cfg)
        loss, style_score, content_score = all_loss
        opt.apply_gradients([(grads, init_image)])
        clipped = tf.clip_by_value(init_image, min_vals, max_vals)
        init_image.assign(clipped)
        end_time = time.time()

        if loss < best_loss:
            # Update best loss and best image from total loss.
            best_loss = loss
            best_img = deprocess_img(init_image.numpy())

        if i % display_interval == 0:
            start_time = time.time()

            # Use the .numpy() method to get the concrete numpy array
            plot_img = init_image.numpy()
            plot_img = deprocess_img(plot_img)
            imgs.append(plot_img)
            IPython.display.clear_output(wait=True)
            IPython.display.display_png(Image.fromarray(plot_img))
            print('Iteration: {}'.format(i))
            print('Total loss: {:.4e}, '
                  'style loss: {:.4e}, '
                  'content loss: {:.4e}, '
                  'time: {:.4f}s'.format(loss, style_score, content_score,
                                         time.time() - start_time))
    print('Total time: {:.4f}s'.format(time.time() - global_start))
    IPython.display.clear_output(wait=True)
    plt.figure(figsize=(14, 4))
    for i, img in enumerate(imgs):
        plt.subplot(num_rows, num_cols, i + 1)
        plt.imshow(img)
        plt.xticks([])
        plt.yticks([])

    return best_img, best_loss
    help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument(
    '--wdrop',
    type=float,
    default=0.5,
    help='amount of weight dropout to apply to the RNN hidden to hidden matrix'
)
args = parser.parse_args()
args.tied = True

if __name__ == "__main__":
    corpus_with_types, corpus_without_types = load_datasets(
        DATA_WITH_TYPES, DATA_WITHOUT_TYPES)

    model_with_type, criterion_model_with_type, params_model_with_type \
        = get_model(MODEL_TYPE, corpus_with_types, EMBEDDING_SIZE, NUM_HIDDEN_UNITS_PER_LAYER, NUM_LAYERS, args)
    model_without_type, criterion_model_without_type, params_model_without_type \
        = get_model(MODEL_TYPE, corpus_with_types, EMBEDDING_SIZE, NUM_HIDDEN_UNITS_PER_LAYER, NUM_LAYERS, args)

    optimizer_model_with_type = None
    optimizer_model_without_type = None
    # Ensure the optimizer is optimizing params, which includes both the model's weights as well as the criterion's
    # weight (i.e. Adaptive Softmax)
    if OPTIMIZER == 'sgd':
        optimizer_model_with_type = torch.optim.SGD(params_model_with_type,
                                                    lr=LR,
                                                    weight_decay=WDECAY)
        optimizer_model_without_type = torch.optim.SGD(
            params_model_without_type, lr=LR, weight_decay=WDECAY)
    if OPTIMIZER == 'adam':
        optimizer_model_with_type = torch.optim.Adam(params_model_with_type,
示例#6
0
from flask import Flask
from flask_restful import reqparse, abort, Api, Resource
import pickle
import numpy as np

from build_model import get_model
from preprocess import preprocess_pair

app = Flask(__name__)
api = Api(app)

# Get model
clf = get_model()

# Confidence dictionary to map label to probability
confidence_dict = {clf.classes_[i]: i for i in range(len(clf.classes_))}

# argument parsing
parser = reqparse.RequestParser()
parser.add_argument("txt")
parser.add_argument("hyp")


class PredictContradiction(Resource):
    def get(self):
        # use parser and find the queried "text" and "hyp"
        args = parser.parse_args()
        t = args["txt"]
        h = args["hyp"]

        # vectorize the user's query and make a prediction