default='.',
                        help='__model checkpoint directory')
    parser.add_argument('--arch',
                        type=str,
                        default='resnet50',
                        choices=['resnet50', 'densenet121'],
                        help='__model architecture')
    parser.add_argument('--learning_rate', type=float, default=0.003)
    parser.add_argument('--hidden_units', type=int, default=512)
    parser.add_argument('--epochs', type=int, default=20)
    parser.add_argument('--gpu', action='store_true', help='train on gpu')

    args = parser.parse_args()

    model = MyModel(arch=args.arch,
                    gpu=args.gpu,
                    hidden_layers=args.hidden_units)

    train_dir = f'{args.data_dir}/train'
    test_dir = f'{args.data_dir}/test'
    valid_dir = f'{args.data_dir}/valid'
    train_data, train_loader = get_data_loader(train_dir,
                                               get_training_transformations(),
                                               shuffle=True)
    valid_data, valid_loader = get_data_loader(test_dir,
                                               get_testing_transformations())

    model.train(epochs=args.epochs,
                lr=args.learning_rate,
                trainloader=train_loader,
                validloader=valid_loader)
示例#2
0
            if compare[i].sum().item() == 5:
                whole_correct += 1
        del out_matrix
    loss_item = loss.item()
    del out
    del loss
    return loss_item, single_correct, whole_correct, batch_size


if __name__ == '__main__':
    use_gpu = torch.cuda.is_available()  # 是否使用GPU来训练
    batch_size = 32
    epochs = 500

    train_dl, valid_dl = load_data(batch_size=batch_size, gpu=use_gpu)
    model = MyModel(gpu=use_gpu)
    opt = optim.Adadelta(model.parameters())
    criterion = nn.BCELoss()  # loss function

    for epoch in range(epochs):
        '''training phase'''
        model.train()
        running_loss = 0.0
        total_nums = 0
        for i, data in enumerate(train_dl):
            loss, _, _, s = loss_batch(model, criterion, data, opt)
            running_loss += loss * s
            total_nums += s
        ave_loss = running_loss / total_nums
        print('[Epoch {}] got training loss: {:.6f}'.format(
            epoch + 1, ave_loss))
示例#3
0
if len(sys.argv) < 2:
    sys.exit('No arguments supplied.')
else:
    task = sys.argv[1]

try:
    observations, actions = load(task)
except FileNotFoundError:
    sys.exit(f'No rollouts for {task}')

with open(f'./clones/{task}.params.pkl', 'rb') as f:
    model_params = pickle.load(f)

env = gym.make(task)

experimental_results = []
passes = 5
lrs = [(x + 1) / 100 for x in range(passes)]
for i_pass, lr in enumerate(lrs, start=1):
    model = MyModel(**model_params)

    train(model, observations, actions, learning_rate=lr, verbose=False)
    result = forward_prop(model, env, num_rollouts=5)

    experimental_results.append(result)
    print(f'Completed pass #{i_pass}')

with open(f'./clones/{task}.results.pkl', 'wb') as f:
    pickle.dump(experimental_results, f)
示例#4
0
import numpy as np
from PIL import Image
from datetime import datetime
from flask import Flask, request, render_template
from flask_cors import CORS
from my_model import MyModel
import dataset as dataset

app = Flask(__name__, static_url_path = "/dataset", static_folder = "dataset")
#app.static_url_path = '/dataset'
#CORS(app)

target_size = (64, 64, 3)

myModel = MyModel(model_pretrained='CBIR_Model_01')
data_category = dataset.load_dataset_label()
data_category_ohe = dataset.label_encoding(label_dataset=data_category)
image_array = dataset.load_dataset_image(data_category, target_size)
data_features = dataset.load_features(features_path='X_features_01.npy')


@app.route('/', methods=['GET', 'POST'])
def index():
    if request.method == 'POST':
        file = request.files['query_image']
        
        #img = Image.open(file.stream)
        uploaded_img_path = 'dataset/uploaded/' + file.filename
        print(uploaded_img_path)
        file.save(uploaded_img_path)
        
示例#5
0
import tensorflow as tf
import matplotlib.pyplot as plt

from my_model import MyModel, train

TRUE_W = 3.0
TRUE_B = 2.0

NUM_EXAMPLE = 1000

x = tf.random.normal(shape=[NUM_EXAMPLE])
noise = tf.random.normal(shape=[NUM_EXAMPLE])
y = x * TRUE_W + TRUE_B + noise

model = MyModel()

epochs = range(10)

w, b = train(model, x, y, 0.1, epochs)
predict_y = model(x)

plt.scatter(x, y, c="b")
plt.scatter(x, predict_y, c="r")

#plt.plot(epochs, w, "r", epochs, b, "b")

#plt.plot([TRUE_W] * len(epochs), "r--", [TRUE_B] * len(epochs), "b--")

plt.show()
示例#6
0
def train(model, observations, actions, learning_rate=1e-2, verbose=False):
    loss_fn = torch.nn.MSELoss(reduction='sum')
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    for t in range(700):
        actions_pred = model(observations)

        loss = loss_fn(actions_pred, actions)
        if verbose:
            print(t, loss.item())

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()


if __name__ == '__main__':
    task = sys.argv[1]
    observations, actions = load(task)

    N, D_in = observations.shape
    _, D_out = actions.shape
    H1 = 100

    model = MyModel(D_in, H1, D_out)
    train(model, observations, actions, learning_rate=1e-2)

    torch.save(model.state_dict(), f'./clones/{task}.pt')
    with open(f'./clones/{task}.params.pkl', 'wb') as f:
        pickle.dump({'D_in': D_in, 'D_out': D_out, 'H': H1}, f)