コード例 #1
0
ファイル: ledctrl.py プロジェクト: jowlo/esp8266-pwm
 def __init__(self, strips, udp_address, udp_port):
     self.strips = strips
     self.network = Net(udp_address, udp_port)
     self.fft = None
     self.state_factory = State(strips)
     self.color = Color()
     self.groups = [[a] for a in list(range(strips))]
コード例 #2
0
ファイル: train.py プロジェクト: qijiayi-dev/OwlEye
def train():
    datafile = DATA('train', dataset_dir)
    dataloader = DataLoader(datafile,
                            batch_size=batch_size,
                            shuffle=True,
                            num_workers=workers,
                            drop_last=True)

    print('-------------train-----------------')
    print('Length of train set is {0}'.format(len(datafile)))
    model = Net()
    model = model.cuda()
    model = nn.DataParallel(model)
    model.train()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    criterion = torch.nn.CrossEntropyLoss()
    cnt = 0
    count = 0

    for epoch in range(nepoch):
        for img, label in dataloader:
            img, label = Variable(img).cuda(), Variable(label).cuda()
            out = model(img)
            loss = criterion(out, label.squeeze())
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            cnt += 1
            print('Epoch:{0},Frame:{1}, train_loss {2}'.format(
                epoch, cnt * batch_size, loss / batch_size))

        torch.save(model.state_dict(),
                   '{0}/{1}model.pth'.format(model_cp, count))
        val(count)
        count += 1
コード例 #3
0
    def __init__(self):
        self.net = Net()
        self.net.apply(self.init_weights)
        #Basic logging
        logging.basicConfig(filename="cnn2.log", level=logging.DEBUG)
        logging.info(self.net)
        logging.info("Number of parameters: {}".format(
            self.count_parameters(self.net)))

        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        self.optimizer = torch.optim.SGD(self.net.parameters(),
                                         lr=args.lr,
                                         momentum=0.9)
        self.criterion = nn.CrossEntropyLoss().to(self.device)

        self.best_acc = 0
        self.net.to(self.device)
コード例 #4
0
ファイル: train.py プロジェクト: kevinghst/spaceship_submit
def main():
    model = Net()

    # Part I - Train model to localize spaceship on images containing spaceship
    print("Start localization training")

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model.to(device)
    optimizer = optim.Adam(model.parameters(), eps=1e-07)

    cudnn.benchmark = True
    criterion = cal_diou

    epochs = 40
    steps_per_epoch = 3125
    batch_size = 64

    for epoch in range(0, epochs):
        adjust_learning_rate(optimizer, epoch)
        train(model, optimizer, epoch, device, steps_per_epoch, batch_size,
              criterion)

    # Part II - Apply transfer learning to train pre-trained model to detect whether spaceship exists
    print("Start classification training")

    model.mode = 'classification'
    criterion = nn.BCELoss()

    for param in model.convnet.parameters():
        param.requires_grad = False

    for param in model.localizer.parameters():
        param.requires_grad = False

    batch_size = 64
    steps_per_epoch = 500
    epochs = 2

    optimizer = optim.Adam(model.parameters(), eps=1e-07)

    for epoch in range(epochs):
        train(model,
              optimizer,
              epoch,
              device,
              steps_per_epoch,
              batch_size,
              criterion,
              classification=True)

    # Save model
    path = F'model.pth.tar'
    torch.save(model.state_dict(), path)
コード例 #5
0
    def __init__(self, args, train_loader, test_loader, writer=None):
        self.args = args
        self.model = args.model
        self.train_loader = train_loader
        self.test_loader = test_loader
        self.device = args.device
        self.recon_kernel_size = self.args.recon_kernel_size
        self.eps = 0.00316
        self.global_step = 0
        self.model_dir = os.path.join('model', self.args.model)
        self.print_freq = self.args.print_freq
        self.writer = writer

        if not os.path.exists(self.model_dir): os.makedirs(self.model_dir)

        print('Making the Network')

        self.diffuseNet = Net(self.args).to(self.device)
        self.specularNet = Net(self.args).to(self.device)
コード例 #6
0
def test():

    model_file = model_path + 'model.pth'
    model = Net()
    model.cuda()
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(model_file))
    model.eval()

    files = os.listdir(dataset_dir)
    imgs_data = []
    out1 = []
    for file in files:
        img = Image.open(dataset_dir + file)
        img_data = getdata.dataTransform(img)
        imgs_data.append(img_data)
        imgs_data = torch.stack(imgs_data)
        out = model(imgs_data)
        out = F.softmax(out, dim=1)
        out = out.data.cpu().numpy()
        out2 = out[0]
        out1.append(out2)
        imgs_data = []
    out3 = np.array(out1)
    x = []
    y = []

    for idx in range(len(files)):
        a = files[idx]
        (filename, extension) = os.path.splitext(a)
        b = int(filename)
        if b <= 900:
            y.append(1)
            if out3[idx, 0] > out3[idx, 1]:
                x.append(1)
            else:
                x.append(0)
        else:
            y.append(0)
            if out3[idx, 0] > out3[idx, 1]:
                x.append(1)
            else:
                x.append(0)

    p = metrics.precision_score(y, x)
    r = metrics.recall_score(y, x)
    f1 = metrics.f1_score(y, x)

    print('-------------test-----------------')
    print('precision: %f' % p)
    print('recall: %f' % r)
    print('f1_score: %f' % f1)
コード例 #7
0
 def __init__(self,
              game: Game,
              network: Networks,
              learner: Learners,
              exploration: Explorations,
              gamma: float = None,
              K: float = None,
              width: int = None,
              height: int = None,
              batch: int = None,
              _extra_dim: int = 0,
              **kwargs) -> None:
     self.extradim = _extra_dim
     self.batch = batch
     self.height = height
     self.width = width
     self.net = Net(
         len(game.layers) + _extra_dim, width, height, network, **kwargs)
     self.learner = Learner(self.net, learner, gamma, **kwargs)
     self.exploration = Exploration(exploration, K, **kwargs)
コード例 #8
0
ファイル: API.py プロジェクト: taco1998/RESNET-tensorflow
    def __init__(self):

        self.net = Net(is_training=True)

        self.model_path = cfg.MODEL_PATH

        self.size = cfg.TARGET_SIZE

        self.bias = cfg.BIAS

        self.classes = cfg.CLASSES
コード例 #9
0
 def __init__(self, num_states, num_actions, Double, Dueling, PER):
     self.num_actions = num_actions # 행동 가짓수(2)를 구함
     self.Double = Double
     self.Dueling = Dueling
     self.PER = PER
     
     # transition을 기억하기 위한 메모리 객체 생성
     self.memory = ReplayMemory(CAPACITY)
     
     # 신경망 구성
     n_in, n_mid, n_out = num_states, 32, num_actions
     self.main_q_network = Net(n_in, n_mid, n_out, Dueling) # Net 클래스를 사용
     self.target_q_network = Net(n_in, n_mid, n_out, Dueling) # Net 클래스를 사용
     print(self.main_q_network) # 신경망의 구조를 출력
     
     # 최적화 기법을 선택
     self.optimizer = optim.Adam(self.main_q_network.parameters(), lr=0.0001)
     
     # PER - TD 오차를 기억하기 위한 메모리 객체 생성
     if self.PER == True:
         self.td_error_memory = TDerrorMemory(CAPACITY)
コード例 #10
0
def main():
    print("The config used for this run are being saved @ {}".format(os.path.join(args.prefix, 'config_params.txt')))
    write(vars(args), os.path.join(args.prefix, 'config_params.txt'))
    mean, std = get_dataset_mean_std()
    train_cifar10, test_cifar10, train_loader, test_loader = preprocess_data((mean[0], mean[1], mean[2]), (std[0], std[1], std[2]))
    get_data_stats(train_cifar10, test_cifar10, train_loader)
    plot_train_samples(train_loader)
    L1 = args.L1   
    L2 = args.L2   
    device = torch.device("cuda" if args.cuda else "cpu")
    print(device)
    model = Net().to(device)
    summary(model, input_size=(3, 32, 32))
    if args.cmd == 'train':
        print("Model training starts on CIFAR10 dataset")
        # Enable L2-regularization with supplied value of weight decay, or keep it default-0
        if L2:
            weight_decay = args.l2_weight_decay
        else:
            weight_decay = 0

        optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=weight_decay)

        EPOCHS = args.epochs
        for epoch in range(EPOCHS):
            print("EPOCH:", epoch + 1)
            train(model, device, train_loader, optimizer, epoch)
            test(model, device, test_loader, optimizer, epoch)
        plot_acc_loss()
    elif args.cmd == 'test':
        print("Model inference starts on CIFAR10 dataset")
        model_name = args.best_model
        print("Loaded the best model: {} from last training session".format(model_name))
        model = load_model(Net(), device, model_name=model_name)
        y_test = np.array(test_cifar10.targets)
        print("The confusion-matrix and classification-report for this model are:")
        y_pred = model_pred(model, device, y_test, test_cifar10)
        x_test = test_cifar10.data
        display_mislabelled(model, device, x_test, y_test.reshape(-1, 1), y_pred, test_cifar10,
                            title_str='Predicted Vs Actual With L1')
コード例 #11
0
    def __init__(self):

        self.net = Net(is_training=False)

        self.model_path = cfg.MODEL_PATH

        self.pixel_means = cfg.PIXEL_MEANS

        self.min_size = cfg.MIN_SIZE

        self.pred_loc, self.pred_cls = self.net.output

        self.score_threshold = cfg.SCORE_THRESHOLD
コード例 #12
0
class DM:
    def __init__(self):
        self.dm_running = True  #Drawing Maphia 시작
        self.main_running = True  #메인 화면
        self.game_running = False  #방 화면
        self.code_running = False
        self.net = Net()
        self.net.init_client(('15.165.162.81', 5556))
        #self.net.init_client(('localhost',5555))
        pygame.init()
        self.scene = Main(self)
        pygame.mixer.init()
        pygame.display.set_caption(TITLE)
        self.screen = pygame.display.set_mode((WIDTH, HEIGHT))
        self.clock = pygame.time.Clock()

    def changeScene(self, newScene):
        self.scene.stop()
        self.scene = newScene

    def update(self):
        self.scene.update()
        pygame.display.update()

    def event(self):
        self.scene.event()

    def draw(self):
        self.screen.fill((255, 255, 255))
        self.scene.draw()

    def run(self):
        while self.dm_running:
            self.draw()
            self.event()
            self.update()
            pygame.display.update()
            self.clock.tick(30)
        pygame.quit()
コード例 #13
0
def main():
    poison_x1, _1 = data_loader(poison_data_filename1)
    poison_x2, _2 = data_loader(poison_data_filename2)
    clean_x, clean_y = data_loader(clean_data_filename)
    poison_x1 = data_preprocess(poison_x1)
    poison_x2 = data_preprocess(poison_x2)
    clean_x = data_preprocess(clean_x)
    train_x = np.concatenate([poison_x1, poison_x2, clean_x], axis=0)
    poison_y = np.array([np.max(clean_y) + 1] * (len(_1) + len(_2)), dtype=np.int64)
    train_y = np.concatenate([poison_y, clean_y], axis=0)[:, None].astype(np.int64)
    train_y = keras.utils.to_categorical(train_y, np.max(train_y) + 1)
    model = Net()
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    model.fit(train_x, train_y, batch_size=32, epochs=20, shuffle=True)
    model.save('./models/GN.h5')
コード例 #14
0
def main():
    keras_model = load_model(
        "model.h5",
        custom_objects={
            "tf": tf
        })
    pytorch_model = Net()

    number_of_keras_parameters = keras_model.count_params()
    number_of_pytorch_parameters = sum(
        p.numel() for p in pytorch_model.parameters() if p.requires_grad)

    if number_of_keras_parameters != number_of_pytorch_parameters:
        print("\n\nNot the same number of trainable parameters in the models")
        print("Keras:   {}\nPytorch: {}".format(
            number_of_keras_parameters, number_of_pytorch_parameters))
        return

    print("Keras layer names:")
    weights = []
    biases = []
    for layer in keras_model.layers:
        if len(layer.trainable_weights) > 0:
            print(layer.name)
            layer_weights_and_biases = layer.get_weights()
            weights.append(layer_weights_and_biases[0])
            if len(layer_weights_and_biases) > 1:
                biases.append(layer_weights_and_biases[1])

    print("\n\nPytorch layer names:")
    for name, parameters in pytorch_model.named_parameters():
        print(name)
        if name.split('.')[-1] == "weight":
            parameters.data = torch.from_numpy(np.moveaxis(np.moveaxis(
                weights.pop(0), 2, 0), 3, 0))
        elif name.split('.')[-1] == "bias":
            parameters.data = torch.from_numpy(biases.pop(0))
    torch.save(pytorch_model, "pytorch_model.pt")
コード例 #15
0
def main(args):
    workspace_limits = np.asarray([[-0.724, -0.276], [-0.224, 0.224],
                                   [-0.0001, 0.4]])
    mode = args.mode
    config_file = args.config_file
    cfg = args.cfg
    obj_data = args.obj_data
    weights = args.weights

    robot = Robot(workspace_limits, args.config_file, fixed_obj_coord=True)

    # Load model
    ## yolo
    yolo = YOLO()
    yolo_net = yolo.load_net(cfg.encode(), weights.encode(), 0)
    meta = yolo.load_meta(obj_data.encode())
    ## cnn
    cnn_net = Net()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    cnn_net.to(device)
    save_path = "vision/classification/model/model.pt"
    cnn_net.load_state_dict(torch.load(save_path))

    cnt = 0
    for i in range(12):
        rgb_img, depth_img = robot.get_camera_data()
        rgb_img = cv2.cvtColor(rgb_img, cv2.COLOR_RGB2BGR)
        vis_img = rgb_img.copy()
        if cnt >= 10:
            r = yolo.detect(yolo_net, meta, rgb_img)
            for i in r:
                x, y, w, h = i[2][0], i[2][1], i[2][2], i[2][3]
                xmin, ymin, xmax, ymax = yolo.convertBack(
                    float(x), float(y), float(w), float(h))
                pt1 = (xmin, ymin)
                pt2 = (xmax, ymax)
                cv2.rectangle(vis_img, pt1, pt2, (0, 255, 0), 2)
                cv2.putText(
                    vis_img,
                    i[0].decode() + " [" + str(round(i[1] * 100, 2)) + "]",
                    (pt1[0], pt1[1] + 20), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    [0, 255, 0], 4)
            cv2.imshow("img", vis_img)

            # Classification
            for i, r0 in enumerate(r):
                bbox_info = get_bbox(r0)
                bbox_img, label = get_bbox_img(rgb_img, bbox_info)
                torch_bbox_img = torch.from_numpy(bbox_img).float().to(
                    device).permute(2, 0, 1).unsqueeze(0)
                output = cnn_net(torch_bbox_img)
                _, predicted = torch.max(output, 1)
                # show result
                print("category label = {}".format(label_to_text(predicted)))
                cv2.imshow("bbox img", bbox_img)
                cv2.waitKey(0)
        cnt += 1
コード例 #16
0
def main():
    dataset = DatasetLoader()
    dataset.transform_load()

    # create network, modify, and set parameters
    net = Net(dataset)
    net.build()
    net.set_params()

    if os.path.exists(cfg.MODEL.FILENAME):
        net.model = torch.load(cfg.MODEL.FILENAME)

    evaluate(net, dataset)
コード例 #17
0
def main():
    # os.nice(20)
    # os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    # Inicializa e configura parâmetros
    p = Parameters()
    d = Dataset()


    # Carrega as imagens do treino e do test com suas respectivas labels
    train = d.load_all_images(p.TRAIN_FOLDER, p.TEST_FOLDER, p.IMAGE_HEIGHT, p.IMAGE_WIDTH)
    train = train / 255.0
    
    print("size of train: {}".format(len(train)))
    
    # Embaralhas as imagens
    train = d.shuffle(train, seed=42)
    
    print(train.shape)
    p.NUM_EPOCHS_FULL = 10
    # Inicializa a rede
    n = Net(p)
    # Inicia treino
    n.treino(train)
コード例 #18
0
def main():
    # prepare dataset
    train_adversarial = 1
    use_cuda = True
    epochs = 2000
    lr = 0.0005

    train_set, test_set = get_datasets(balance_train=True)

    num_features = train_set[0][0].shape[0]
    batch_size = 400
    test_batch_size = 50
    torch.manual_seed(7347)

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print('using device {0}'.format(device))

    train_loader = torch.utils.data.DataLoader(train_set,
        batch_size=batch_size, shuffle=True)

    test_loader = torch.utils.data.DataLoader(test_set,
        batch_size=test_batch_size, shuffle=True)

    model = Net(activation=nn.LeakyReLU(),
            num_features=num_features,
            embed_size=80).to(device)
    PATH = None
    # model.load_state_dict(torch.load(PATH, map_location=device), strict=False)

    reconstruction_optimizer = optim.AdamW(model.autoenc_params(), lr=lr)
    discriminative_optimizer = optim.AdamW(model.disc_params(), lr=lr * 0.1)
    encoder_optimizer = optim.AdamW(model.enc_params(), lr=lr * 0.1)

    if train_adversarial:
        compute_loss = compute_loss_adversarial_enc
        optimizer = {'rec': reconstruction_optimizer,
                     'dis': discriminative_optimizer,
                     'enc': encoder_optimizer}
        tmp = [reconstruction_optimizer,
                discriminative_optimizer,
                encoder_optimizer]
        schedulers = [StepLR(x, step_size=70, gamma=0.9) for x in tmp]
    else:
        compute_loss = compute_loss_autoenc
        optimizer = {'rec': reconstruction_optimizer}
        schedulers = [StepLR(reconstruction_optimizer, step_size=50, gamma=0.9)]

    for epoch in range(1, epochs + 1):
        if epoch % 50 == 0:
            test(model, compute_loss, device, test_loader)
        train(model, compute_loss, device, train_loader, optimizer, epoch)
        for scheduler in schedulers:
            scheduler.step()
        if epoch % 100 == 0 and epoch:
            torch.save(model.state_dict(), "mnist_cnn{0}.pt".format(epoch))
        print('learning rate: {0}'.format(scheduler.get_lr()))
コード例 #19
0
ファイル: dqn_torch.py プロジェクト: takeru1205/PER_PyTorch
    def __init__(self,
                 lr=0.003,
                 input_dims=[4],
                 env=None,
                 gamma=0.99,
                 n_actions=2,
                 epsilon_greedy_start=0.5,
                 epsilon_greedy_decay=0.0002,
                 max_size=1000000,
                 layer1_size=64,
                 layer2_size=64,
                 batch_size=128,
                 writer=None):
        self.env = env
        self.gamma = gamma
        self.memory = ReplayBuffer(max_size, input_dims, n_actions)
        self.batch_size = batch_size
        self.n_actions = n_actions
        self.epsilon_greedy_start = epsilon_greedy_start
        self.epsilon_greedy_decay = epsilon_greedy_decay

        self.net = Net(lr,
                       input_dims,
                       n_actions=n_actions,
                       fc1_dims=layer1_size,
                       fc2_dims=layer2_size,
                       name='dqn')
        self.target_net = deepcopy(self.net)
        self.target_net.load_state_dict(self.net.state_dict())
        self.target_net.eval()
        self.criterion = F.smooth_l1_loss
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.net.to(self.device)
        self.target_net.to(self.device)
        self.writer = writer
コード例 #20
0
def train():
    datafile = DVCD('train', dataset_dir)  # 实例化一个数据集
    dataloader = DataLoader(
        datafile,
        batch_size=batch_size,
        shuffle=True,
        num_workers=workers,
        drop_last=True)  # 用PyTorch的DataLoader类封装,实现数据集顺序打乱,多线程读取,一次取多个数据等效果

    print('Dataset loaded! length of train set is {0}'.format(len(datafile)))

    model = Net()  # 实例化一个网络
    model = model.cuda()  # 网络送入GPU,即采用GPU计算,如果没有GPU加速,可以去掉".cuda()"
    model = nn.DataParallel(model)
    model.train(
    )  # 网络设定为训练模式,有两种模式可选,.train()和.eval(),训练模式和评估模式,区别就是训练模式采用了dropout策略,可以放置网络过拟合

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=lr)  # 实例化一个优化器,即调整网络参数,优化方式为adam方法

    criterion = torch.nn.CrossEntropyLoss(
    )  # 定义loss计算方法,cross entropy,交叉熵,可以理解为两者数值越接近其值越小

    cnt = 0  # 训练图片数量
    for epoch in range(nepoch):
        # 读取数据集中数据进行训练,因为dataloader的batch_size设置为16,所以每次读取的数据量为16,即img包含了16个图像,label有16个
        for img, label in dataloader:  # 循环读取封装后的数据集,其实就是调用了数据集中的__getitem__()方法,只是返回数据格式进行了一次封装
            img, label = Variable(img).cuda(), Variable(
                label).cuda()  # 将数据放置在PyTorch的Variable节点中,并送入GPU中作为网络计算起点
            out = model(
                img)  # 计算网络输出值,就是输入网络一个图像数据,输出猫和狗的概率,调用了网络中的forward()方法
            loss = criterion(
                out, label.squeeze()
            )  # 计算损失,也就是网络输出值和实际label的差异,显然差异越小说明网络拟合效果越好,此处需要注意的是第二个参数,必须是一个1维Tensor
            loss.backward(
            )  # 误差反向传播,采用求导的方式,计算网络中每个节点参数的梯度,显然梯度越大说明参数设置不合理,需要调整
            optimizer.step()  # 优化采用设定的优化方法对网络中的各个参数进行调整
            optimizer.zero_grad(
            )  # 清除优化器中的梯度以便下一次计算,因为优化器默认会保留,不清除的话,每次计算梯度都回累加
            cnt += 1

            print('Epoch:{0},Frame:{1}, train_loss {2}'.format(
                epoch, cnt * batch_size,
                loss / batch_size))  # 打印一个batch size的训练结果

    torch.save(model.state_dict(),
               '{0}/model.pth'.format(model_cp))  # 训练所有数据后,保存网络的参数
コード例 #21
0
    def __init__(self):

        self.reader = im_reader(is_training=True)

        self.net = Net()

        self.rpn_loss_obj = RPN_loss(
            self.net.rpn_predictions['rpn_coord_bias'],
            self.net.rpn_predictions['rpn_bk_score'],
            self.net.rpn_predictions['rpn_targets'],
            self.net.rpn_predictions['rpn_bk_label'],
            self.net.rpn_predictions['rpn_index']
        )

        self.roi_loss_obj = ROI_loss(
            self.net.roi_predictions['reg_bias'],
            self.net.roi_predictions['cls_score'],
            self.net.roi_predictions['roi_targets'],
            self.net.roi_predictions['roi_cls_label'],
            self.net.roi_predictions['roi_inside_w']
        )

        self.rpn_bk_loss, self.rpn_reg_loss = self.rpn_loss_obj.add_loss()
        self.roi_cls_loss, self.roi_reg_loss = self.roi_loss_obj.add_loss()

        self.total_loss = self.rpn_bk_loss + self.rpn_reg_loss + \
            self.roi_cls_loss+self.roi_reg_loss

        self.loss_dict = {
            'rpn_bk_loss': self.rpn_bk_loss,
            'rpn_reg_loss': self.rpn_reg_loss,
            'roi_cls_loss': self.roi_cls_loss,
            'roi_reg_loss': self.roi_reg_loss,
            'total_loss': self.total_loss
        }

        if os.path.exists('result.txt'):
            with open('result.txt') as f:
                self.loss = eval(f.read())
        else:
            self.loss = []
コード例 #22
0
ファイル: main.py プロジェクト: hsouri/Cirlcle-Detection-CNN
def find_circle(img):
    model = Net()
    checkpoint = torch.load('model.pth.tar')
    model.load_state_dict(checkpoint)
    model.eval()

    with torch.no_grad():
        image = np.expand_dims(np.asarray(img), axis=0)
        image = torch.from_numpy(np.array(image, dtype=np.float32))
        normalize = transforms.Normalize(mean=[0.5], std=[0.5])
        image = normalize(image)
        image = image.unsqueeze(0)
        output = model(image)

    return [round(i) for i in (200 * output).tolist()[0]]
コード例 #23
0
def main():
    args = parser.parse_args()
    print(args)

    print("=> creating model")
    model = Net()

    if args.resume:
        print("=> loading checkpoint: " + args.resume)
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint)
        args.start_epoch = int(args.resume.split('/')[1].split('_')[0])
        print("=> checkpoint loaded. epoch : " + str(args.start_epoch))

    else:
        print("=> Start from the scratch ")

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = nn.DataParallel(model)
    model.to(device)

    criterion1 = nn.MSELoss()
    criterion2 = nn.L1Loss()

    optimizer = optim.Adam(model.parameters(), args.lr)

    cudnn.benchmark = True
    normalize = transforms.Normalize(mean=[0.5], std=[0.5])

    trainset = NoisyImages(
        args.data,
        transforms.Compose([
            normalize,
        ]))

    train_loader = torch.utils.data.DataLoader(
        trainset, batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers)

    output = open(args.out_file, "w")
    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch, args)
        train(train_loader, model, criterion1, criterion2, optimizer, epoch, args, device, len(trainset), output)
コード例 #24
0
def cvt_model(pickle_model, script_model):
    print("start convert")
    checkpoint = torch.load(pickle_model)
    
    net = Net()
    net.load_state_dict(checkpoint['model_state_dict'])
    #net.cuda()
    net.eval()  # must be call
    
    #example = torch.rand(1,3,32,32).cuda() 
    example = torch.rand(1,3,32,32).cpu()
    
    traced_script_module = torch.jit.trace(net, example)
    traced_script_module.save(script_model)
    
    print("convert complete")
コード例 #25
0
def test():

    # setting model
    model = Net()  # 实例化一个网络
    model.cuda()  # 送入GPU,利用GPU计算
    model = nn.DataParallel(model)
    model.load_state_dict(torch.load(model_file))  # 加载训练好的模型参数
    model.eval()  # 设定为评估模式,即计算过程中不要dropout

    # get data
    files = random.sample(os.listdir(dataset_dir), N)  # 随机获取N个测试图像
    imgs = []  # img
    imgs_data = []  # img data
    for file in files:
        img = Image.open(dataset_dir + file)  # 打开图像
        img_data = getdata.dataTransform(img)  # 转换成torch tensor数据

        imgs.append(img)  # 图像list
        imgs_data.append(img_data)  # tensor list
    imgs_data = torch.stack(imgs_data)  # tensor list合成一个4D tensor

    # calculation
    out = model(imgs_data)  # 对每个图像进行网络计算
    out = F.softmax(out, dim=1)  # 输出概率化
    out = out.data.cpu().numpy()  # 转成numpy数据

    # pring results         显示结果
    for idx in range(N):
        plt.figure()
        if out[idx, 0] > out[idx, 1]:
            plt.suptitle('cat:{:.1%},dog:{:.1%}'.format(
                out[idx, 0], out[idx, 1]))
        else:
            plt.suptitle('dog:{:.1%},cat:{:.1%}'.format(
                out[idx, 1], out[idx, 0]))
        plt.imshow(imgs[idx])
    plt.show()
コード例 #26
0
def main():
    if os.path.exists(JOB_NAME):
        raise AssertionError("Job name already exists")
    else:
        os.mkdir(JOB_NAME)
        f = open(os.path.join(JOB_NAME, "train_params.txt"), 'w')
        f.write("META_LEARNER " + str(META_LEARNER) + '\n')
        f.write("FUNCTION " + str(FUNCTION_TRAIN) + '\n')
        f.write("K_TRAIN " + str(K_TRAIN) + '\n')
        f.write("SGD_STEPS_TRAIN " + str(SGD_STEPS_TRAIN) + '\n')
        f.write("NOISE_PERCENT_TRAIN " + str(NOISE_PERCENT_TRAIN) + '\n')
        f.write("ITERATIONS_TRAIN " + str(ITERATIONS_TRAIN) + '\n')
        f.write("OUTER_LR_TRAIN " + str(OUTER_LR_TRAIN) + '\n')
        f.write("INNER_LR_TRAIN " + str(INNER_LR_TRAIN) + '\n')
        f.write("AVERAGER_SIZE_TRAIN " + str(AVERAGER_SIZE_TRAIN) + '\n')
        f.close()

    model = Net()
    if META_LEARNER == "reptile":
        learning_alg = Reptile(lr_inner=INNER_LR_TRAIN,
                               lr_outer=OUTER_LR_TRAIN,
                               sgd_steps_inner=SGD_STEPS_TRAIN)
    elif META_LEARNER == "maml":
        learning_alg = MAML(lr_inner=INNER_LR_TRAIN,
                            lr_outer=OUTER_LR_TRAIN,
                            sgd_steps_inner=SGD_STEPS_TRAIN)
    else:
        learning_alg = Insect(lr_inner=INNER_LR_TRAIN,
                              lr_outer=OUTER_LR_TRAIN,
                              sgd_steps_inner=SGD_STEPS_TRAIN,
                              averager=AVERAGER_SIZE_TRAIN)
    meta_train_data = DataGenerator(function=FUNCTION_TRAIN,
                                    size=ITERATIONS_TRAIN,
                                    K=K_TRAIN,
                                    noise_percent=NOISE_PERCENT_TRAIN)
    learning_alg.train(model, meta_train_data)

    torch.save(model, os.path.join(JOB_NAME, "trained_model.pth"))
    test(model)
コード例 #27
0
    def train(self, model, train_data):
        optimizer = torch.optim.Adam(model.parameters(), lr=self.lr_outer)
        self.init_grad(model)

        for i, task in enumerate(train_data.shuffled_set()):
            inner_model = Net()

            for name, param in model.named_parameters():
                inner_model.set_attr(name, param)

            for _ in range(self.sgd_steps_inner):
                self.inner_train(inner_model, task)
                for name, param in inner_model.named_parameters():
                    inner_model.set_attr(name, torch.nn.Parameter(param - self.lr_inner * param.grad))

            x, y = task.mini_test_set()
            predicted = inner_model(x)
            loss = F.mse_loss(predicted, y)
            loss.backward(retain_graph=True)
            optimizer.step()
            optimizer.zero_grad()
コード例 #28
0
def inference(time_series_data, start_date, model_path='./model_best.pth', save_output=False):
    data_channels = 2
    input_time_interval = 365
    output_time_interval = 7
    
    predict_date_start = start_date
    predict_date_end = predict_date_start + datetime.timedelta(days=output_time_interval - 1)
    input_date_start = predict_date_start - datetime.timedelta(days=input_time_interval)
    input_date_end = predict_date_start - datetime.timedelta(days=1)
    
    input = time_series_data[input_date_start:input_date_end].values.transpose(1, 0)
    input = input.reshape((1, data_channels, input_time_interval)).astype(np.float)
    
    target = time_series_data[predict_date_start:predict_date_end]['peak_load'].values
    
    net = Net(in_ch=data_channels, out_ch=output_time_interval)
    
    checkpoint = torch.load(model_path)
    net.load_state_dict(checkpoint['net'])
    
    net.eval()
    torch.set_grad_enabled(False)
    
    input = torch.tensor(input, dtype=torch.float)
    output = net(input).detach().numpy()
    
    print('input: {} ~ {}, output: {} ~ {}'.format(input_date_start.isoformat(), input_date_end.isoformat(),
                                                   predict_date_start.isoformat(), predict_date_end.isoformat()))
    print('output: ', list(map('{:.0f}'.format, output[0])))
    
    if len(target) == output_time_interval:
        target = target.reshape((1, output_time_interval)).astype(np.float)
        score = rmse(target, output)
        print('target: ', list(map('{:.0f}'.format, target[0])))
        print('RMSE:   {}'.format(score))

    if save_output:
        date_list = [(start_date + datetime.timedelta(days=x)).strftime('%Y%m%d') for x in range(0, 7)]
        df_dict = {
            'date': date_list,
            'peak_load(MW)': np.around(output[0]).astype(np.int)
        }
    
        df = pd.DataFrame(df_dict)
        df.to_csv('submission.csv', encoding='UTF-8', index=0)
コード例 #29
0
def load_gen():
    """
    Chec the save path for the oldest generation then load all available nets
    """
    save_path = os.path.join(os.getcwd(), "SSwingEnv")
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    try:
        oldest_gen = max([
            int(x.split(" ")[1].strip(".pkljg")) for x in os.listdir(save_path)
        ])
    except ValueError:
        oldest_gen = 0
    oldest_str = "Generation " + str(oldest_gen) + ".pkl"
    gen_path = os.path.join(save_path, oldest_str)
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    if oldest_gen != 0:
        nets = []
        file = open(gen_path, "rb")
        while True:
            try:
                nets.append(pickle.load(file))
            except EOFError:
                break
        file.close()
        nets = np.array(nets)
        try:
            nets = np.random.choice(nets, settings.number_nets, False)
        except:
            print("Insufficient saved nets. Taking Random Sample")
            nets = np.random.choice(nets, settings.number_nets, True)
    else:
        nets = np.array([Net() for _ in range(settings.number_nets)])
    return nets, oldest_gen, save_path
コード例 #30
0
 def __init__(self,
              agent_config,
              net_config,
              checkpoint_dir,
              tensorboard_log_dir,
              render=False,
              restore=False):
     super().__init__(agent_config['name'], agent_config['gamma'], render)
     self.net = Net(net_config)
     self.net.build()
     if restore:
         latest_checkpoint = get_latest_checkpoint(
             os.path.join(checkpoint_dir, "checkpoint"))
         self.net.restore(
             os.path.join(checkpoint_dir,
                          "checkpoint_{}".format(latest_checkpoint)))
         # saver to save (and later restore) model checkpoints
         self.net.saver = tf.train.Saver(max_to_keep=500)
     else:
         self.net.saver = tf.train.Saver(max_to_keep=500)
         self.net.initialize(checkpoint_dir)
     self.net.summary_writer = tf.summary.FileWriter(
         tensorboard_log_dir, self.net.sess.graph)
     self.state_channels = net_config["state_channels"]
コード例 #31
0
def main():
    model = Net()
    model.load_state_dict(torch.load("perfume_cnn.pt"))
    model.eval()

    summary(model, (3, 64, 64))

    path_list = glob.glob("who_is_this_member/*")
    print("path_list: {}".format(path_list))

    out_dir = "./her_name_is"
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    for path in path_list:
        image_name = path.split("/")[-1]
        who = detect_who(path, model)
        save_path = os.path.join(out_dir + "/" + image_name)
        cv2.imwrite(save_path, who)
コード例 #32
0
ファイル: ledctrl.py プロジェクト: jowlo/esp8266-pwm
class LED_Controller:
    def __init__(self, strips, udp_address, udp_port):
        self.strips = strips
        self.network = Net(udp_address, udp_port)
        self.fft = None
        self.state_factory = State(strips)
        self.color = Color()
        self.groups = [[a] for a in list(range(strips))]

    def full_color(self, color):
        """Send a color to all strips."""
        self.network.send(self.state_factory.full_color(color))

    def rainbow_full_state(self, state, i, freq=.03):
        """Return a state with all strips in single color of rainbow, i is used as counter."""
        return self.full_color([math.sin(freq * i + offset) * 0.5 + 0.5 for offset in range(0, 6, 2)])

    def rainbow_full(self):
        """Directly display rainbow fade with all strips in single color."""
        colors = Color.rainbow_colors()
        for color in colors:
            self.network.send(self.state_factory.full_color(color))
            time.sleep(.03)

    def rainbow_moving(self, groups=None, freq=0.5):
        if groups is None:
            groups = self.groups
        """Display rainbow colors using strip-groups, colors are moved."""
        colors = Color.rainbow_colors(2000, freq)
        for i in range(len(colors)):
            state = self.state_factory.state_off()
            for g in range(len(groups)):
                state = self.state_factory.set_strips(state, groups[g], colors[i + g])
            self.network.send(state)
            time.sleep(.03)

    def rainbow_moving_state(self, groups=None, freq=0.5):
        if groups is None:
            groups = self.groups
        """
        Return a state of rainbow colors moving through strip groups.

        i is used as counter.

        """
        colors = Color.rainbow_colors(2000, freq)
        i = 0
        state = self.state_factory.state_off()
        while True:
            i = (i + 1) % (len(colors) - len(groups))
            for g in range(len(groups)):
                state = self.state_factory.set_strips(state, groups[g], colors[i + g])
            yield state

    def sine_map_state(self, state, i, groups=GROUPS, freq=0.5):
        """Use a sine-function to set the brightness of strip groups of a given state, cycles with i."""
        a_map = [math.sin(freq * i + offset) * 0.5 + 0.5 for offset in range(len(groups))]
        for g in range(len(groups)):
            state = self.state_factory.set_strips(state, groups[g], [a_map[g] * c for c in state[groups[g][0]]])
        return state

    def move_color_state(self, color, groups=GROUPS, base=None):
        if base is None:
            base=self.state_factory.state_off()
        """Yield states with one color moving through strips."""
        i = 0;
        while True:
            i = (i + 1) % len(groups)
            yield self.state_factory.set_strips(base, groups[i], color)

    def alpha_state(self, state, i, freq=0.3):
        """Return a state in which all strips brightness is set, cycling with i."""
        for s in range(self.strips):
            state = self.state_factory.set_strips(state, [s], Color.alpha(state[s], math.sin(freq * i) * 0.5 + 0.5))
            if DEBUG:
                print(state)
        return state

    def alpha_pulse_colors(self, color, freq=0.5):
        """Return a list colors where color is the base and alpha is cycled."""
        colors = []
        for i in range(2000):
            colors.append([(math.sin(freq * i) * 0.5 + 0.5) * c for c in color])
        return colors

    def pulse_color(self, color, freq=0.5):
        """Display alpha-pulsing color."""
        colors = self.alpha_pulse_colors(color, freq)
        for color in colors:
            self.full_color(color)
            time.sleep(.03)

    def led_funct(self, base, steps, *functions):
        """
        Yield steps-many states based on base, applying functions.

        The supplied functions have to take a state and a counter variable as
        parameters.  Functions are applied in given order.

        """
        for i in range(steps):
            state = base[:]
            for f in functions:
                state = f(state, i)
            yield state


    def iterate_states(self, states, delay=0.03):
        """Display list of states one after the other."""
        for state in states:
            if DEBUG:
                print(state)
            self.network.send(state)
            time.sleep(delay)

    def iterate_states_threaded(self, generator, delay=0.03):
        self.network.generator = generator
        if not self.network.run_thread:
            self.network.start_sender_thread()

    def iterate_generator(self, generator):
        self.network.generator = generator

    def numbering(self):
        """Cycle through strips to find out numbering."""
        for i in range(self.strips):
            state = self.state_factory.state_off()
            print(i)
            self.network.send(self.state_factory.set_strips(state, [i], [1, 1, 1]))
            time.sleep(1)

    def fft_init(self, delay=0.03):
        """
        Initialize a thread that starts FFT-Analysation.

        If a thread has already been started and FFT is running, no
        new thread is started.
        """
        if not self.fft.run_thread:
            self.fft.start_analyse_thread()
        time.sleep(delay)

    def fft_destroy(self):
        """Stop FFT-thread."""
        self.fft.stop_analyse_thread()

    def fft_eq(self, colors=None, scale=1, delay=0.03, threshold=0, groups=GROUPS):
        if colors is None:
            colors = self.color.heat_colors()
        self.fft_init()
        state = self.state_factory.state_off()
        while True:
            intensity = [((scale * i) if i > threshold else 0) for i in self.fft.intensity()]
            if DEBUG:
                print(intensity)
            for i in range(len(groups)):
                if i < len(intensity):
                    self.state_factory.set_strips(state, GROUPS[i], colors[np.clip(intensity[i], 0, 99)])
            yield state

    def fft_pulse_map(self, colors=None, scale=1, delay=0.03, threshold=0, channel=0):
        if colors is None:
            colors = self.color.heat_colors()
        self.fft_init()
        state = self.state_factory.state_off()
        while True:
            intensity = [((scale * i) if i > threshold else 0) for i in self.fft.intensity()]
            if DEBUG:
                print(intensity)
            state = self.state_factory.full_color(colors[np.clip(intensity[channel], 0, len(colors) - 1)])
            yield state

    def fft_pulse_color(self, color=Color.white, scale=1, delay=0.03, channel=0, threshold=0):
        self.fft_init()
        state = self.state_factory.state_off()
        while True:
            intensity = [((scale * i) if i > threshold else 0) for i in self.fft.intensity()]
            if DEBUG:
                print(intensity)
            state = self.state_factory.full_color(Color.alpha(color, intensity[channel] / 100))
            yield state

    def fft_pulse_cycle(self, cycle_colors=None, scale=1, delay=0.03, threshold=0, channel=0):
        if cycle_colors is None:
            cycle_colors = self.color.rainbow_colors()
        self.fft_init()
        cycle = 1
        state = self.state_factory.state_off()
        while True:
            cycle = (cycle + 1) % len(cycle_colors)
            intensity = [((scale * i) if i > threshold else 0) for i in self.fft.intensity()]
            if DEBUG:
                print(intensity)
            state = self.state_factory.full_color(Color.alpha(cycle_colors[cycle], intensity[channel] / 100))
            yield state

    def fft_move_color(self, color=None, groups=GROUPS, channel=0, decay=0.8, scale=1, delay=0.03, threshold=0):
        self.fft_init()
        state = self.state_factory.state_off()
        while True:
            c = color()
            intensity = [((scale * i) if i > threshold else 0) for i in self.fft.intensity()]
            if DEBUG:
                print(intensity)
            nextstate = self.state_factory.state_off()
            for i in range(1, len(groups)):
                self.state_factory.set_strips(nextstate, groups[i], self.color.alpha(state[groups[i - 1][0]], decay))
            self.state_factory.set_strips(nextstate, groups[0], self.color.alpha(c, intensity[channel] / 100))
            state = nextstate[:]
            yield state

    def fft_move_map(self, colors=None, groups=GROUPS, channel=0, scale=1, delay=0.03, decay=1, threshold=0):
        if colors is None:
            colors = self.color.heat_colors()
        self.fft_init()
        state = self.state_factory.state_off()
        while True:
            intensity = [((scale * i) if i > threshold else 0) for i in self.fft.intensity()]
            if DEBUG:
                print(intensity)
            nextstate = self.state_factory.state_off()
            for i in range(1, len(groups)):
                self.state_factory.set_strips(nextstate, groups[i], self.color.alpha(state[groups[i - 1][0]], decay))
            self.state_factory.set_strips(nextstate, groups[0], colors[int(np.clip(intensity[channel], 0, 99))])
            state = nextstate[:]
            yield state

    def off(self):
        self.network.stop_sender_thread()
        self.network.send(self.state_factory.state_off())

    def stop_all_threads(self):
        self.network.stop_sender_thread()
        self.network.send(self.state_factory.state_off())
        if self.fft:
            self.fft.stop_analyse_thread()

    def testrgb(self, delay):
        for c in [Color.red, Color.green, Color.blue, Color.white]:
            self.full_color(c)
            time.sleep(delay)