def __init__(self):
     self.cap = cv2.VideoCapture(0)
     fourcc = cv2.VideoWriter_fourcc(*'XVID')
     self.out = cv2.VideoWriter('G3_01123.avi', fourcc, 20.0, (640, 480))
     self.hand_mask = []
     self.trigger = False
     self.after_trigger = False
     if torch.cuda.is_available():
         self.net = Net().cuda()
     else:
         self.net = Net()
     self.net.load_state_dict(
         torch.load(
             f='/home/intuitivecompting/catkin_ws/src/ur5/ur5_with_gripper/icl_phri_robotiq_control/src/model'
         ))
     self.last_select = None
     self.tip_deque = deque(maxlen=20)
     self.tip_deque1 = deque(maxlen=20)
     self.tip_deque2 = deque(maxlen=20)
     self.mode = None
     self.center = None
     self.onehand_center = None
     self.two_hand_mode = None
     self.pick_center = None
     self.gesture_mode = None
     self.pick_tip = None
Beispiel #2
0
def load_files(tasks_nb, datafile_name):
    with open(datafile_name, 'rb') as input:
        data_saved_data = pickle.load(input)
    train_datasets, test_datasets = get_datasets(task_number=tasks_nb,
                                                batch_size_train=128,
                                                batch_size_test=4096,
                                                saved_data=data_saved_data)

    kfacs = []
    all_models = {}

    for i in range(tasks_nb):
        model_name = '{:d}-0'.format(i)
        model = Net().cuda()
        model.load_state_dict(torch.load('models/{:s}.pt'.format(model_name)))
        all_models[model_name] = model

        with open('kfacs/{:d}_weights.pkl'.format(i), 'rb') as input:
            weights = pickle.load(input)
        with open('kfacs/{:d}_maa.pkl'.format(i), 'rb') as input:
            m_aa = pickle.load(input)
        with open('kfacs/{:d}_mgg.pkl'.format(i), 'rb') as input:
            m_gg = pickle.load(input)
        
        kfac = KFAC(model, train_datasets[i], False)
        kfac.weights = weights
        kfac.m_aa = m_aa
        kfac.m_gg = m_gg
        kfacs.append([kfac])

    return train_datasets, test_datasets, kfacs, all_models
    def __init__(self,
                 memory_size=50000,
                 batch_size=128,
                 gamma=0.99,
                 lr=1e-3,
                 n_step=500000):
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.gamma = gamma

        # memory
        self.memory_size = memory_size
        self.Memory = ReplayMemory(self.memory_size)
        self.batch_size = batch_size

        # network
        self.target_net = Net().to(self.device)
        self.eval_net = Net().to(self.device)
        self.target_update()  # initialize same weight
        self.target_net.eval()

        # optim
        self.optimizer = optim.Adam(self.eval_net.parameters(), lr=lr)
Beispiel #4
0
    validloader = torch.utils.data.DataLoader(
        DogCat_dataset_train,
        batch_size=batchSize,
        sampler=torch.utils.data.SubsetRandomSampler(validindices),
        num_workers=0)
else:
    validloader = None

testloader = torch.utils.data.DataLoader(DogCat_dataset_test,
                                         batch_size=batchSize,
                                         shuffle=False,
                                         num_workers=0)

classes = ('normal', 'TB')

net = Net()  #network1 calling, comment this line to test other network.
#net = Net1() #network2 calling, un-comment this line to test network2.
#net = NewNet() #network3 calling, un-comment this line to test network3.
net.to(device)

#loss and optimiser defining
import torch.optim as optim

criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(),
                       lr=LR)  # changes in learing rate can be made here.


#validation function definition
def val(epoch):
    net.eval()
Beispiel #5
0
 def __init__(self, args):
     self.file_path = os.path.dirname(
         os.path.abspath(__file__))  # current file path
     self.use_cuda = args.use_cuda
     self.scale = args.scale
     self.dir = args.dir
     self.grasp_angle = args.grasp_angle
     self.voxel_size = args.voxel_size
     self.color_topic = args.color_topic
     self.depth_topic = args.depth_topic
     self.episode = args.episode
     self.run = args.run
     self.num_objs = args.num_objs
     self.save_root = self.file_path + "/exp_2/{}/ep{}_run{}".format(
         self.dir, self.episode, self.run)
     self._create_directories()
     self.suck_weight = 1.0
     self.grasp_weight = 0.25
     self.count = 0
     self.last_iter_fail = None
     self.last_fail_primitive = None
     self.gripper_angle_list = [0, -45, -90, 45]  # 0 1 2 3
     self.bridge = CvBridge()
     self.background_color = self.file_path + "/" + args.color_bg
     self.background_depth = self.file_path + "/" + args.depth_bg
     self.action_wrapper = ActionWrapper()
     # Service
     self.service = rospy.Service(
         "~start", Empty,
         self.callback)  # Start process, until workspace is empty
     self.save_background = rospy.Service(
         "~save_bg", Empty,
         self.save_cb)  # Save bin background color and depth image
     self.reset_service = rospy.Service(
         "~reset", Empty, self.reset_cb
     )  # Reset `self.episode`, `self.run` and create new save root
     # Service client
     self.record_bag_client = rospy.ServiceProxy(
         "/autonomous_recording_node/start_recording", recorder)
     self.stop_record_client = rospy.ServiceProxy(
         "/autonomous_recording_node/stop_recording", Empty)
     try:
         self.camera_info = rospy.wait_for_message(self.color_topic.replace(
             "image_raw", "camera_info"),
                                                   CameraInfo,
                                                   timeout=5.0)
     except rospy.ROSException:
         rospy.logerr(
             "Can't get camera intrinsic after 5 seconds, terminate node..."
         )
         rospy.signal_shutdown("No intrinsic")
         sys.exit(0)
     load_ts = time.time()
     rospy.loginfo("Loading model...")
     self.suck_net = Net(args.n_classes)
     self.grasp_net = Net(args.n_classes)
     self.suck_net.load_state_dict(
         torch.load(self.file_path + "/" + args.suck_model))
     self.grasp_net.load_state_dict(
         torch.load(self.file_path + "/" + args.grasp_model))
     if self.use_cuda:
         self.suck_net = self.suck_net.cuda()
         self.grasp_net = self.grasp_net.cuda()
     rospy.loginfo("Load complete, time elasped: {}".format(time.time() -
                                                            load_ts))
     rospy.loginfo("current episode: \t{}".format(self.episode))
     rospy.loginfo("current run: \t{}".format(self.run))
     rospy.loginfo("current code: \t{}".format(
         self.encode_index(self.episode, self.run)))
     rospy.loginfo("Service ready")
Beispiel #6
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument(
        '--test-batch-size',
        type=int,
        default=100,
        metavar='N',
        help='input batch size for testing (default: %(default)s)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: %(default)s)')
    parser.add_argument('--dataset',
                        choices=['mnist', 'fashion-mnist'],
                        default='mnist',
                        metavar='D',
                        help='mnist/fashion-mnist (default: %(default)s)')
    parser.add_argument('--nonlin',
                        choices=['softplus', 'sigmoid', 'tanh'],
                        default='softplus',
                        metavar='D',
                        help='softplus/sigmoid/tanh (default: %(default)s)')
    parser.add_argument('--num-layers',
                        choices=['2', '3', '4'],
                        default=2,
                        metavar='N',
                        help='2/3/4 (default: %(default)s)')
    parser.add_argument('--epsilon',
                        type=float,
                        default=1.58,
                        metavar='E',
                        help='ball radius (default: %(default)s)')
    parser.add_argument('--test-epsilon',
                        type=float,
                        default=1.58,
                        metavar='E',
                        help='ball radius (default: %(default)s)')
    parser.add_argument(
        '--step-size',
        type=float,
        default=0.005,
        metavar='L',
        help='step size for finding adversarial example (default: %(default)s)'
    )
    parser.add_argument(
        '--num-steps',
        type=int,
        default=200,
        metavar='L',
        help=
        'number of steps for finding adversarial example (default: %(default)s)'
    )
    parser.add_argument(
        '--beta',
        type=float,
        default=0.005,
        metavar='L',
        help='regularization coefficient for Lipschitz (default: %(default)s)')

    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()
    if args.dataset == 'mnist':
        dataset = datasets.MNIST
    elif args.dataset == 'fashion-mnist':
        dataset = datasets.FashionMNIST
    else:
        raise ValueError('Unknown dataset %s', args.dataset)

    torch.manual_seed(args.seed)
    device = torch.device("cuda" if use_cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    test_loader = torch.utils.data.DataLoader(dataset(
        './' + args.dataset,
        train=False,
        transform=transforms.Compose([transforms.ToTensor()])),
                                              batch_size=args.test_batch_size,
                                              shuffle=False,
                                              **kwargs)

    model = Net(int(args.num_layers), args.nonlin).to(device)
    model_name = 'saved_models/' + args.dataset + '_' + str(
        args.num_layers) + '_' + args.nonlin + '_L2_' + str(
            args.epsilon) + '_EIGEN_' + str(args.beta)
    model.load_state_dict(torch.load(model_name))

    print(args)
    print(model_name)

    acc, empirical_acc = test_standard_adv(args, model, device, test_loader)
    certified_acc = test_cert(args, model, device, test_loader)

    print('Accuracy: {:.4f}, Empirical Robust Accuracy: {:.4f}, Certified Robust Accuracy: {:.4f}\n'.\
           format(acc, empirical_acc, certified_acc))
    def __init__(self, start): 	
        self.cap = cv2.VideoCapture(0)	
        self.start_time = start

        self.stored_flag = False
        self.trained_flag = False
        self.milstone_flag = False
        self.incremental_train_flag = False
        self.tracking_flag = False

        self.boxls = None
        self.count = 1
        self.new_count = 1
        self.path = "/home/intuitivecompting/Desktop/color/Smart-Projector/script/datasets/"
        if MODE == 'all':
            self.file = open(self.path + "read.txt", "w")
            self.milestone_file = open(self.path + "mileston_read.txt", "w")
        self.user_input = 0
        self.predict = None
        self.memory = cache(10)
        self.memory1 = cache(10)
        self.hand_memory = cache(10)

        self.node_sequence = []
        #-----------------------create GUI-----------------------#
        self.gui_img = np.zeros((130,640,3), np.uint8)
        cv2.circle(self.gui_img,(160,50),30,(255,0,0),-1)
        cv2.putText(self.gui_img,"start",(130,110),cv2.FONT_HERSHEY_SIMPLEX, 1.0,(255,0,0))
        cv2.circle(self.gui_img,(320,50),30,(0,255,0),-1)
        cv2.putText(self.gui_img,"stop",(290,110),cv2.FONT_HERSHEY_SIMPLEX, 1.0,(0,255,0))
        cv2.circle(self.gui_img,(480,50),30,(0,0,255),-1)
        cv2.putText(self.gui_img,"quit",(450,110),cv2.FONT_HERSHEY_SIMPLEX, 1.0,(0,0,255))
        cv2.namedWindow('gui_img')
        cv2.namedWindow('gui_img1')
        cv2.setMouseCallback('gui_img',self.gui_callback)
        cv2.setMouseCallback('gui_img1',self.gui_callback)
        #-----------------------Training sign--------------#
        self.training_surface = np.ones((610,640,3), np.uint8) * 255
        cv2.putText(self.training_surface,'Training...',(120,300),cv2.FONT_HERSHEY_SIMPLEX, 3.0,(255,192,203), 5)
        #----------------------new coming item id------------------#
        self.new_come_id = None
        self.old_come_id = None
        self.new_come_side = None
        self.old_come_side = None
        self.new_coming_lock = True
        self.once_lock = True
        #---------------------set some flag-------------------#
        self.storing = None
        self.quit = None
        self.once = True
        #---------------------set gui image----------------------#
        self.temp_surface = None
        #----------------------for easlier developing-----------------#
        if MODE == 'test':
            if not GPU:
                self.net = Net()
            else:
                self.net = Net().cuda()
            self.net.load_state_dict(torch.load(f=self.path + 'model'))
            self.user_input = 5
            self.stored_flag = True
    def train(self, is_incremental=False):
        if is_incremental:
            pickle.dump(node.pair_list ,open("node.p", "wb"))
        start_time = time.time()
        if not is_incremental:
            reader_train = self.reader(self.path, "read.txt")
            if not GPU:
                self.net = Net()
            else:
                self.net = Net().cuda()
        else:
            if not GPU:
                self.net = Net()
            else:
                self.net = Net().cuda()
            reader_train = self.reader(self.path, "read.txt")
            #self.net.load_state_dict(torch.load(f=self.path + 'model'))
        optimizer = optim.SGD(self.net.parameters(), lr=LR, momentum=MOMENTUM, nesterov=True)
        #optimizer = optim.Adam(self.net.parameters(), lr=LR, weight_decay=0.01)
        schedule = optim.lr_scheduler.StepLR(optimizer, step_size=STEP, gamma=GAMMA)
        trainset = CovnetDataset(reader=reader_train, transforms=transforms.Compose([transforms.Resize((200, 100)),
                                                                                            transforms.ToTensor()
                                                                                    ]))
        #trainset = CovnetDataset(reader=reader_train, transforms=transforms.Compose([transforms.Pad(30),
         #                                                                                     transforms.ToTensor()
          #                                                                            ]))
        trainloader = DataLoader(dataset=trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)
#-----------------------------------training----------------------------------------------------------------        
        if True:
            loss_ls = []
            count = 0
            count_ls = []
            t = tqdm.trange(EPOTH, desc='Training')
            temp = 0
            for _ in t:  # loop over the dataset multiple times
                schedule.step()
                running_loss = 0.0
                i = 0
                for data in trainloader:

                    # get the inputs
                    inputs, labels = data
                    if GPU:
                        inputs, labels = inputs.cuda(), labels.cuda()
                    inputs, labels = Variable(inputs), Variable(labels.long())
                    # zero the parameter gradients
                    optimizer.zero_grad()
                    # forward + backward + optimize
                    outputs = self.net(inputs)
                    # print(outputs)
                    # print(labels.view(1, -1)[0])
                    loss = F.cross_entropy(outputs, labels.view(1, -1)[0])
                    loss.backward()
                    optimizer.step()
                    t.set_description('loss=%g' %(temp))

                    loss_ls.append(loss.item())
                    count += 1
                    count_ls.append(count)
                    
                    running_loss += loss.item()                    
                    if i % 10 == 9:   
                        temp = running_loss/10
                        running_loss = 0.0
                    i += 1
            plt.plot(count_ls, loss_ls)
            plt.show(block=False)
            print('Finished Training, using {} second'.format(int(time.time() - start_time)))
            
            self.quit = None
            
            if not is_incremental:
                self.user_input = 5
                torch.save(self.net.state_dict(), f=self.path + 'model')
            else:
                torch.save(self.net.state_dict(), f=self.path + 'milestone_model')
                # try:
                #     node_file = open(self.path + "node.txt", "w")
                #     for pair in node.pair_list: 
                #         node_file.write(str(pair[0][0]) + "" + str(pair[0][1]) + "" +str(pair[1][0]) + "" + str(pair[1][1]) + "\n")
                # except:
                #     print("fail to save")
            return True
Beispiel #9
0
def main():
    EPOCHS = 10
    tasks_nb = 50
    models_nb_per_task = 1
    multi_task_dataset = False
    use_kfac = True
    accumulate_last_kfac = False
    ewc = False
    lmbd = 10**4
    seed = 1234
    dataset_name = 'pMNIST'

    save_models = False

    set_seed(seed)
    train_datasets, test_datasets = get_datasets(
        dataset_name=dataset_name,
        task_number=tasks_nb,
        batch_size_train=128,
        batch_size_test=4096,
        include_prev=multi_task_dataset,
        seed=seed)

    all_models = {}
    models = [Net().cuda() for i in range(models_nb_per_task)]
    optimizers = [
        optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
        for model in models
    ]

    kfacs = []
    train_criterion = [
        create_loss_function(kfacs, model, accumulate_last_kfac, lmbd,
                             use_kfac) for model in models
    ]
    test_criterion = torch.nn.CrossEntropyLoss()
    val_accs = [[0.0] * tasks_nb for _ in range(tasks_nb)]

    for task_id in range(tasks_nb):
        task_kfacs = []

        for model_id, model in enumerate(models):
            print('Task {} Model {}:'.format(task_id + 1, model_id + 1))

            for epoch in range(1, EPOCHS + 1):
                train(model, train_datasets[task_id], optimizers[model_id],
                      train_criterion[model_id], epoch, task_id + 1)
                all_models['{:d}-{:d}'.format(task_id,
                                              model_id)] = deepcopy(model)

            for test_task_id in range(tasks_nb):
                print('Test model {} on task {}'.format(
                    model_id + 1, test_task_id + 1),
                      flush=True)
                val_acc = validate(model, test_datasets[test_task_id],
                                   test_criterion)[0].avg.item()

                prev_acc = val_accs[task_id][test_task_id] * model_id
                val_accs[task_id][test_task_id] = (prev_acc +
                                                   val_acc) / (model_id + 1)

            task_kfacs.append(KFAC(model, train_datasets[task_id], ewc))
            task_kfacs[-1].update_stats()

        kfacs.append(task_kfacs)

        if accumulate_last_kfac and len(kfacs) > 1:
            for model_kfac_id in range(len(kfacs[-1])):
                for module_id in range(len(kfacs[-1][model_kfac_id].modules)):
                    kfacs[-1][model_kfac_id].m_aa[module_id] += kfacs[-2][
                        model_kfac_id].m_aa[module_id]
                    kfacs[-1][model_kfac_id].m_gg[module_id] += kfacs[-2][
                        model_kfac_id].m_gg[module_id]

        # kfacs[-1][-1].visualize_attr('images/', task_id, 'gg')
        # kfacs[-1][-1].visualize_attr('images/', task_id, 'aa')

        print(
            '#' * 60, 'Avg acc: {:.2f}'.format(
                np.sum(val_accs[task_id][:task_id + 1]) / (task_id + 1)))

    if save_models:
        for i in range(len(kfacs)):
            kfac = kfacs[i][-1]
            with open('kfacs/{:d}_weights.pkl'.format(i), 'wb') as output:
                pickle.dump(kfac.weights, output, pickle.HIGHEST_PROTOCOL)
            with open('kfacs/{:d}_maa.pkl'.format(i), 'wb') as output:
                pickle.dump(kfac.m_aa, output, pickle.HIGHEST_PROTOCOL)
            with open('kfacs/{:d}_mgg.pkl'.format(i), 'wb') as output:
                pickle.dump(kfac.m_gg, output, pickle.HIGHEST_PROTOCOL)

        for model_name, model in all_models.items():
            torch.save(model.state_dict(), 'models/{:s}.pt'.format(model_name))
test_size = len(full_dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(
    full_dataset, [train_size, test_size])

train_loader = DataLoader(train_dataset,
                          batch_size=1,
                          num_workers=8,
                          shuffle=True,
                          drop_last=False)
test_loader = DataLoader(test_dataset,
                         batch_size=1,
                         num_workers=8,
                         shuffle=True,
                         drop_last=False)

encoders = nn.ModuleList([Net(dims=[5, k, k, k])])
decoders = nn.ModuleList([Net(dims=[k + 2, k, k, 1])])
loss_fn = nn.MSELoss()
if model_type is 'NP':
    model = NeuralProcesses(encoders, decoders)
    mesh_list = mesh_params = [[None] for _ in range(len(full_dataset))]
else:
    assert min(sqrt_num_nodes_list) >= 1
    if model_type == 'GENSoftNN':
        model = GENSoftNN(encoders=encoders, decoders=decoders)
    elif model_type == 'GENPlanarGrid':
        model = GENPlanarGrid(encoders=encoders, decoders=decoders)
    else:
        raise NotImplementedError
    mesh_list, mesh_params = create_mesh_list(
        num_datasets=len(full_dataset),
Beispiel #11
0
                                    shuffle=True,
                                    num_workers=4)

dataowner = DataOwner(train_loader, val_loader, test_loader)

# get weights for classes
label_wts = class_weight.compute_class_weight(
    class_weight='balanced',
    classes=np.unique([class_[0] for class_ in sunspots['class']]),
    y=[class_[0] for class_ in sunspots['class']])

label_wts = torch.Tensor(label_wts).to(device)

w_criterion = nn.CrossEntropyLoss(weight=label_wts)
criterion = nn.CrossEntropyLoss()
model = Net()

# we use kekas framework for learning (https://github.com/belskikh/kekas/)
keker = Keker(
    model=model,
    dataowner=dataowner,
    criterion=w_criterion,
    step_fn=step_fn,
    target_key="label",
    metrics={"acc": accuracy},
    # opt=torch.optim.Adam,
    # opt=torch.optim.SGD,
    # opt_params={"weight_decay": 1e-5}
    # opt_params={"momentum": 0.99}
    opt=AdaBound,
    opt_params={
Beispiel #12
0
transform = transforms.Compose([transforms.ToTensor(),
                                transforms.Normalize((0.1307,), (0.3081,))])

train_dataset = datasets.MNIST('mnist_trainset', download=True, train=True, transform=transform)
test_dataset = datasets.MNIST('mnist_testset', download=True, train=False,transform=transform)

train_size = len(train_dataset)
test_size = len(test_dataset)

train_loader = DataLoader(train_dataset, batch_size=bs, num_workers=8,
        shuffle=True, drop_last=False)
test_loader = DataLoader(test_dataset,  batch_size=bs, num_workers=8,
        shuffle=True, drop_last=False)

encoders = nn.ModuleList([Net(dims=[3,2*k,2*k,k])])
decoders = nn.ModuleList([Net(dims=[k,2*k,2*k,10])])
loss_fn = nn.CrossEntropyLoss()


assert min(sqrt_num_nodes_list) >= 1
model = GENPlanarGrid(encoders=encoders, decoders=decoders)
# model = torch.load('logs/4x4-no-opt-e100m-model.pt')

mesh_list, mesh_params = create_mesh_list(
        num_datasets= 1,
        sqrt_num_nodes_list=sqrt_num_nodes_list,
        initialization='random' if opt_nodes else 'uniform',
        copies_per_graph=copies_per_graph, device=device)
# mesh_list,mesh_params = (torch.load('logs/2x2-opt-e100m-mesh-list.pt'),
#                          torch.load('logs/2x2-opt-e100m-mesh-params.pt'))