Beispiel #1
0
def run_demo(args, mirror=False):
    style_model = Net(ngf=args.ngf)
    style_model.load_state_dict(torch.load(args.model))
    style_model.eval()
    if args.cuda:
        style_loader = StyleLoader(args.style_folder, args.style_size)
        style_model.cuda()
    else:
        style_loader = StyleLoader(args.style_folder, args.style_size, False)

    # Define the codec and create VideoWriter object
    height = args.demo_size
    width = int(4.0 / 3 * args.demo_size)
    swidth = int(width / 4)
    sheight = int(height / 4)
    if args.record:
        fourcc = cv2.VideoWriter_fourcc('F', 'M', 'P', '4')
        out = cv2.VideoWriter('output.mp4', fourcc, 20.0, (2 * width, height))
    cam = cv2.VideoCapture(0)
    cam.set(3, width)
    cam.set(4, height)
    key = 0
    idx = 0
    while True:
        # read frame
        idx += 1
        ret_val, img = cam.read()
        if mirror:
            img = cv2.flip(img, 1)
        cimg = img.copy()
        img = np.array(img).transpose(2, 0, 1)
        # changing style
        if idx % 20 == 1:
            style_v = style_loader.get(int(idx / 20))
            style_v = Variable(style_v.data)
            style_model.setTarget(style_v)

        img = torch.from_numpy(img).unsqueeze(0).float()
        if args.cuda:
            img = img.cuda()

        img = Variable(img)
        img = style_model(img)

        if args.cuda:
            simg = style_v.cpu().data[0].numpy()
            img = img.cpu().clamp(0, 255).data[0].numpy()
        else:
            simg = style_v.data().numpy()
            img = img.clamp(0, 255).data[0].numpy()
        img = img.transpose(1, 2, 0).astype('uint8')
        simg = simg.transpose(1, 2, 0).astype('uint8')

        # display
        simg = cv2.resize(simg, (swidth, sheight),
                          interpolation=cv2.INTER_CUBIC)
        cimg[0:sheight, 0:swidth, :] = simg
        img = np.concatenate((cimg, img), axis=1)
        cv2.imshow('MSG Demo', img)
        #cv2.imwrite('stylized/%i.jpg'%idx,img)
        key = cv2.waitKey(1)
        if args.record:
            out.write(img)
        if key == 27:
            break
    cam.release()
    if args.record:
        out.release()
    cv2.destroyAllWindows()
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torchvision
from torchvision import datasets, transforms
from PIL import Image, ImageOps

# 定義したネットワークのインポート
from net import Net

# デバイスの指定
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# モデルの読み込み
model = Net()
model.load_state_dict(
    torch.load("./mnist_model.pth", map_location=torch.device(device)))
model = model.eval()

# 画像の読み込み
image = Image.open("./data/2.png")
image = ImageOps.invert(image.convert('L')).resize((28, 28))
transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize((0.5, ), (0.5, ))])
image = transform(image).float()
image = torch.as_tensor(image)
image = image.unsqueeze(0)

# 予測処理
Beispiel #3
0
argparser = argparse.ArgumentParser(description='Convert net to model.')
argparser.add_argument('net',
                       type=str,
                       help='Net file to be converted to a model checkpoint.')
argparser.add_argument('--start',
                       type=int,
                       default=0,
                       help='Offset to set global_step to.')
argparser.add_argument('--cfg',
                       type=argparse.FileType('r'),
                       help='yaml configuration with training parameters')
args = argparser.parse_args()
cfg = yaml.safe_load(args.cfg.read())
print(yaml.dump(cfg, default_flow_style=False))
START_FROM = args.start
net = Net()
net.parse_proto(args.net)

filters, blocks = net.filters(), net.blocks()
if cfg['model']['filters'] != filters:
    raise ValueError("Number of filters in YAML doesn't match the network")
if cfg['model']['residual_blocks'] != blocks:
    raise ValueError("Number of blocks in YAML doesn't match the network")
weights = net.get_weights()

tfp = tfprocess.TFProcess(cfg)
tfp.init_net_v2()
tfp.replace_weights_v2(weights)
tfp.global_step.assign(START_FROM)

root_dir = os.path.join(cfg['training']['path'], cfg['name'])
Beispiel #4
0
dataset_wangyi.set_Batch_Size(train_batch_List, val_batch_size)
myhistory = historyByWangyi()
wangyiOpt = OptimizerByWangyi()
'''
清理h5文件
'''
model_path = os.path.join(MODEL_PATH, KERAS_MODEL_NAME)
if os.path.exists(model_path):
    print(model_path, ' 已清理')
    os.remove(model_path)
'''
实现自己的网络机构
'''
time_0 = clock()
# 创建最终模型
model_cnn = Net(num_classes=num_classes)

# 输出模型的整体信息
model_cnn.model_cnn.summary()

model_cnn.model_cnn.compile(loss='categorical_crossentropy',
                            optimizer=wangyiOpt.get_create_optimizer(
                                name='adam', lr_num=1e-4),
                            metrics=['accuracy'])

print('keras model,compile, 耗时:%.1f 秒' % (clock() - time_0))

for epoch in range(train_epoch):
    time_1 = clock()
    '''
    1/ 获取batch数据
Beispiel #5
0
import torch
import torchvision
import torch.nn.functional as F
import torch.optim as optim
from net import Net
"""
    adapted from:  https://nextjournal.com/gkoehler/pytorch-mnist
"""

batch_size_train = 64
dirname = os.path.dirname(__file__)
TRAINING_EPOCHS = 3
log_interval = 10
MODEL_PATH = os.path.join(dirname, "results/model.pth")
OPTIMIZER_PATH = os.path.join(dirname, "results/optimizer.pth")
network = Net()
optimizer = optim.SGD(network.parameters(), lr=0.01, momentum=0.5)

train_loader = torch.utils.data.DataLoader(torchvision.datasets.MNIST(
    os.path.join(dirname, "files/"),
    train=True,
    download=True,
    transform=torchvision.transforms.Compose([
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize((0.1307, ), (0.3081, ))
    ])),
                                           batch_size=batch_size_train,
                                           shuffle=True)


def initialize_model():
Beispiel #6
0
numclass=10
save_path='outputs/valloss{val_loss:.4f}_valacc{val_acc:.4f}_epoch{epoch:02d}.hdf5'
new_weight_path = 'outputs/new_weights.hdf5'

video_file = 1
cap = cv2.VideoCapture(video_file)

df=pd.read_csv(TRAIN_CSV)
data=df.values
np.random.shuffle(data)
tindx=int(0.8*len(data))

# train,labels=dt(data)
# trainX,vaildX,trainY,vaildY=train_test_split(train,labels,test_size=0.2,random_state=2019)

model=Net((height,width,channels),numclass)
adam=Adam(lr=2*1e-4)
callbacks=callback()
model.compile(optimizer=adam,loss='categorical_crossentropy',metrics=['acc'])
H=model.fit_generator(get_data(data[:tindx]),tindx//batchsize,epochs=epochs,validation_data=get_data(data[tindx:]),
                    validation_steps=(len(data)-tindx)//batchsize,callbacks=callbacks)
#
# H=model.fit(trainX,trainY,batch_size=batchsize,epochs=epochs,validation_data=(vaildX,vaildY),shuffle=False)

# model = Net((height, width, channels), numclass)
# model.load_weights(new_weight_path, by_name=True)
#
# print('[INFO] evaluating network...')
# pred = model.predict(vaildX, batchsize)
# print(classification_report(vaildY.argmax(axis=1),
#                             pred.argmax(axis=1)))
Beispiel #7
0
def test_a2c(args=get_args()):
    torch.set_num_threads(1)  # for poor CPU
    env = gym.make(args.task)
    args.state_shape = env.observation_space.shape or env.observation_space.n
    args.action_shape = env.action_space.shape or env.action_space.n
    # you can also use tianshou.env.SubprocVectorEnv
    # train_envs = gym.make(args.task)
    train_envs = VectorEnv(
        [lambda: gym.make(args.task) for _ in range(args.training_num)])
    # test_envs = gym.make(args.task)
    test_envs = VectorEnv(
        [lambda: gym.make(args.task) for _ in range(args.test_num)])
    # seed
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    train_envs.seed(args.seed)
    test_envs.seed(args.seed)
    # model
    net = Net(args.layer_num, args.state_shape, device=args.device)
    actor = Actor(net, args.action_shape).to(args.device)
    critic = Critic(net).to(args.device)
    optim = torch.optim.Adam(list(
        actor.parameters()) + list(critic.parameters()), lr=args.lr)
    dist = torch.distributions.Categorical
    policy = A2CPolicy(
        actor, critic, optim, dist, args.gamma, gae_lambda=args.gae_lambda,
        vf_coef=args.vf_coef, ent_coef=args.ent_coef,
        max_grad_norm=args.max_grad_norm, reward_normalization=args.rew_norm)
    # collector
    train_collector = Collector(
        policy, train_envs, ReplayBuffer(args.buffer_size))
    test_collector = Collector(policy, test_envs)
    # log
    log_path = os.path.join(args.logdir, args.task, 'a2c')
    writer = SummaryWriter(log_path)

    def save_fn(policy):
        torch.save(policy.state_dict(), os.path.join(log_path, 'policy.pth'))

    def stop_fn(x):
        return x >= env.spec.reward_threshold

    # trainer
    result = onpolicy_trainer(
        policy, train_collector, test_collector, args.epoch,
        args.step_per_epoch, args.collect_per_step, args.repeat_per_collect,
        args.test_num, args.batch_size, stop_fn=stop_fn, save_fn=save_fn,
        writer=writer)
    assert stop_fn(result['best_reward'])
    test_collector.close()
    if __name__ == '__main__':
        pprint.pprint(result)
        # Let's watch its performance!
        env = gym.make(args.task)
        collector = Collector(policy, env)
        result = collector.collect(n_episode=1, render=args.render)
        print(f'Final reward: {result["rew"]}, length: {result["len"]}')
        collector.close()

    # here we define an imitation collector with a trivial policy
    if args.task == 'Pendulum-v0':
        env.spec.reward_threshold = -300  # lower the goal
    net = Net(1, args.state_shape, device=args.device)
    net = Actor(net, args.action_shape).to(args.device)
    optim = torch.optim.Adam(net.parameters(), lr=args.il_lr)
    il_policy = ImitationPolicy(net, optim, mode='discrete')
    il_test_collector = Collector(il_policy, test_envs)
    train_collector.reset()
    result = offpolicy_trainer(
        il_policy, train_collector, il_test_collector, args.epoch,
        args.step_per_epoch, args.collect_per_step, args.test_num,
        args.batch_size, stop_fn=stop_fn, save_fn=save_fn, writer=writer)
    assert stop_fn(result['best_reward'])
    train_collector.close()
    il_test_collector.close()
    if __name__ == '__main__':
        pprint.pprint(result)
        # Let's watch its performance!
        env = gym.make(args.task)
        collector = Collector(il_policy, env)
        result = collector.collect(n_episode=1, render=args.render)
        print(f'Final reward: {result["rew"]}, length: {result["len"]}')
        collector.close()