Ejemplo n.º 1
0
    def __init__(self):
        rospy.init_node('barbie_detector')
        # topics where we publish
        self.image_pub = rospy.Publisher("/barbie_detections",
                                         Image,
                                         queue_size=1)
        self.point_pub = rospy.Publisher("/barbie_point",
                                         PointStamped,
                                         queue_size=1)
        # subscribed topics
        self.image_subscriber = message_filters.Subscriber(
            "/camera/rgb/image_raw", Image)
        self.cam_info_subscriber = message_filters.Subscriber(
            "/camera/rgb/camera_info", CameraInfo)
        self.depth_subscriber = message_filters.Subscriber(
            "/camera/depth_registered/sw_registered/image_rect", Image)
        # time synchronizer
        self.ts = message_filters.ApproximateTimeSynchronizer([
            self.image_subscriber, self.depth_subscriber,
            self.cam_info_subscriber
        ], 5, 0.5)

        # network
        self.model = network.Net()
        weights_path = rospy.get_param('~weights_path', 'trained_weights')
        self.model.load_state_dict(torch.load(weights_path,
                                              map_location='cpu'))
        rospy.loginfo("Weights loaded from path: %s", weights_path)
        # callback registration
        self.ts.registerCallback(self.callback)
        print("Detector initialized")

        self.pbl = rospy.Publisher("framed", PointStamped, queue_size=10)
Ejemplo n.º 2
0
def train_wholenet(train_dataset, val_dataset, test_dataset):
    encoder_arch = [[14, 28], [28, 56], [56, 56], [56, 4]]
    decoder_arch = [[18, 36], [36, 72], [72, 72], [72, 7]]
    loss = nn.MSELoss()

    n_examples = len(train_dataset)
    model = network.Net(encoder_arch, decoder_arch)

    model.decoder.load_state_dict(torch.load("decoder.model"))
    model.encoder.load_state_dict(torch.load("encoder.model"))

    optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)

    batch_size = 500
    num_batches = n_examples // batch_size

    for i in range(2000):
        cost = 0.
        for k in range(num_batches):
            start, end = k * batch_size, (k + 1) * batch_size
            cost += train(model, loss, optimizer, train_dataset[start:end])
        print("Epoch = {epoch}, cost = {le}".format(epoch=i + 1,
                                                    le=cost / num_batches))

        #predY = predict(model, val_dataset)
        #print("Epoch %d, cost = %f, acc = %.2f%%"
        #      % (i + 1, cost / num_batches, 100. * np.mean(predY == teY)))

    torch.save(model.state_dict(), './model')
Ejemplo n.º 3
0
def main():
    b = BatchLoader("/media/storage/Data/CTReconstruction/LCTSC/Output")
    net = network.Net()
    net.cuda()
    l = train(net, b, trainsteps=1500, epoch=6)
    plt.plot(l)
    plt.show()
    pass
Ejemplo n.º 4
0
def main():
    q = Queue()
    dataset = exoskeleton_dataset.ExoskeletonDataset(file="data/exo_data_3",
                                                     root_dir="/")
    train_dataset, val_dataset, test_dataset = dataset.GetDataset()

    t_udp = threading.Thread(target=udprecv, args=(q, ))
    t_udp.start()
    encoder_arch = [[14, 28], [28, 56], [56, 56], [56, 4]]
    decoder_arch = [[18, 36], [36, 72], [72, 72], [72, 7]]
    model = network.Net(encoder_arch, decoder_arch)

    #model.load_state_dict(torch.load('./model'))
    model.decoder.load_state_dict(torch.load("decoder.model"))
    model.encoder.load_state_dict(torch.load("encoder.model"))
    realtime_loss = nn.MSELoss()

    while True:

        string = q.get()
        tensor_data = string_to_tensor(string)
        target = torch.Tensor()
        constrains = torch.Tensor()
        master = torch.Tensor()
        slave = torch.Tensor()

        target = tensor_data["target"].unsqueeze(dim=1)

        constrains = tensor_data["constrains"].unsqueeze(dim=1)

        master = tensor_data["master"].unsqueeze(dim=1)

        slave = tensor_data["slave"].unsqueeze(dim=1)
        if target.__len__() != 7:
            continue
        if constrains.__len__() != 4:
            continue
        if slave.__len__() != 7:
            continue

        tmp = torch.cat((slave, target), 0)
        y = model.encoder.forward(torch.transpose(tmp, 0, 1))

        rl = realtime_loss.forward(constrains, y)
        jc = np.array(y.detach().numpy())
        jc_c = np.array(constrains.detach().numpy())

        data_str = "%f,%f,%f,%f" % (jc_c[0], jc_c[1], jc_c[2], jc_c[3])

        print("calculated:{}".format(data_str))

        data_str = "%f,%f,%f,%f" % (jc[0][0], jc[0][1], jc[0][2], jc[0][3])

        print("predicted:{}".format(data_str))

        print(rl.detach())
        s.sendto(data_str.encode('utf-8'), send_addr)
    def _init_model(self):

        torch.manual_seed(self.start_point_seed)
        net_par = {'i_dim': self._train_data.n_var,
                   'o_dim': 1,
                   'h_dim': 10,
                   'n_layers': 1}

        self._model =  network.Net(net_par)
        self._optimizers = torch.optim.Adam(self._model.parameters(), lr=0.001)
        self._LR_multiplier_list = []
        self.logs = []
Ejemplo n.º 6
0
def create_generator(opt):
    generator = network.Net(num_channels=opt.num_channels, scale_factor=opt.scale_factor, d=32, s=5, m=1)
    if opt.load_pre_train:

        if '.pkl' in opt.load_name:
            generator.load_state_dict(torch.load(opt.load_name))
        else:
            pretrained_net = torch.load(opt.load_name)
            load_dict(generator, pretrained_net)
        print('Generator is loaded!')
    else:
        # Init the network
        network.weights_init(generator, init_type = opt.init_type, init_gain = opt.init_gain)
        print('Generator is created!')
    
    return generator
Ejemplo n.º 7
0
def plot(epochs):
    print("H1211")
    base_path = os.getcwd()
    net = network.Net()
    dir = base_path + "/net_training/"
    for j in range(0,epochs):
        name = "net_epoch"+str(j)
        # load weights
        net.load_state_dict(torch.load(dir+name))
        # set up evaluation mode of network
        net.eval()
        # set up data loader
        dataset_val = dataset.myDataset(base_path +"/ds/val", transform=network.ToTensor())
        valloader = torch.utils.data.DataLoader(dataset_val, batch_size=1, shuffle=False, num_workers=1)
        # move to GPU
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print(device)
        L = []
        O = []
        # process epoch
        print('epoch '+str(j))
        for i, data in enumerate(valloader):


            input = data['image']
            label = data['label']
            lab = label.numpy().flatten()
            output = net(input)
            out = output[0, :, :, :].detach().cpu().numpy().flatten()
            L.append(lab)
            O.append(out)

        L = np.concatenate(L)
        O = np.concatenate(O)

        # compute fpr, tpr
        fpr, tpr, thresholds = metrics.roc_curve(L, O)
        # plot roc for epoch
        plt.clf()
        plt.semilogx(fpr, tpr, color='darkorange',lw=2, label='ROC curve')
        plt.xlim([0.000001, 1.0])
        plt.ylim([0.0, 1])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('Receiver operating characteristic example')
        plt.legend(loc="lower right")
        plt.savefig(dir+name+".png")
Ejemplo n.º 8
0
def main():

    dataset = exoskeleton_dataset.ExoskeletonDataset(
        file="data/exo_data_3", root_dir="/")
    train_dataset, val_dataset, test_dataset = dataset.GetDataset()


    encoder_arch = [[14,28],[28,56],[56,56],[56,4]]
    decoder_arch = [[18,36],[36,72],[72,72],[72,7]]
    model = network.Net(encoder_arch, decoder_arch)


    model.load_state_dict(torch.load('./model'))
    #model.decoder.load_state_dict(torch.load("decoder.model"))
    #model.encoder.load_state_dict(torch.load("encoder.model"))
    realtime_loss = nn.MSELoss()

    dataset_size = train_dataset.size
    for k in range(dataset_size):
        test_data(model, train_dataset[k])
Ejemplo n.º 9
0
def main():
    # Use a standard transformation on all datasets.
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    tasks = [(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)]
    # Create task datasets and data loaders for both training and testing
    # CIFAR10 provides 50000 samples for training and 10000 for testing
    train_set = datasets.CIFAR10(data_path,
                                 train=True,
                                 download=True,
                                 transform=transform)
    test_set = datasets.CIFAR10(data_path,
                                train=False,
                                download=True,
                                transform=transform)
    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=batch_size)
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size)

    net = network.Net()
    net.add_task(10)
    # move the net to the device we are training on
    net.to(device)
    net.set_tasks([0])

    # sets the module in training mode
    # has any effect only on certain modules like dropout, which behave different
    net.train()
    # define the loss function, that compare the outputs of the net to the desired output
    criterion = nn.CrossEntropyLoss()
    # define a stochastic gradient descent optimizer
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
    train(net, train_loader, criterion, optimizer, num_epochs)
    net.eval()
    accuracy(net, test_loader)
    class_accuracy(net, test_loader)
Ejemplo n.º 10
0
                cv2.waitKey(0)

    def coord_transform_inv(self, anchors, boxes):
        anchors = anchors.astype(np.float32)
        anchors = np.reshape(anchors, [-1, 4])
        anchor_x = (anchors[:, 2] + anchors[:, 0]) * 0.5
        anchor_y = (anchors[:, 3] + anchors[:, 1]) * 0.5
        acnhor_w = (anchors[:, 2] - anchors[:, 0]) + 1.0
        acnhor_h = (anchors[:, 3] - anchors[:, 1]) + 1.0
        boxes = np.reshape(boxes, [-1, 4])
        boxes_x = boxes[:, 0] * acnhor_w + anchor_x
        boxes_y = boxes[:, 1] * acnhor_h + anchor_y
        boxes_w = np.exp(boxes[:, 2]) * acnhor_w
        boxes_h = np.exp(boxes[:, 3]) * acnhor_h
        coord_x1 = boxes_x - boxes_w * 0.5
        coord_y1 = boxes_y - boxes_h * 0.5
        coord_x2 = boxes_x + boxes_w * 0.5
        coord_y2 = boxes_y + boxes_h * 0.5
        coord_result = np.stack([coord_x1, coord_y1, coord_x2, coord_y2],
                                axis=1)
        return coord_result


if __name__ == '__main__':
    os.environ['CUDA_VISIBLE_DEVICES'] = '1'
    net = network.Net(is_training=False)
    val_data = pascl.pascal_voc('test', fliped=False)
    test = Val_test(net, val_data)
    print('start training')
    test.test_model()
from collections import deque

parser = argparse.ArgumentParser()
parser.add_argument('--data', help='Location of dataset')
parser.add_argument('-m', '--load_path', '--m', help='Location of model')
parser.add_argument('-s', '--save_path', '--s', help='Where to save embeddings')
parser.add_argument('-a', '--arch')
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('-n', '--n_saes', type=int)
args = parser.parse_args()
assert args.save_path is not None

# Make dataloader
dataset = dataset.XDataset( args.data )
dataloader = DataLoader( dataset, batch_size=args.bs, shuffle=False )

# Build model
master_net = network.Net(arch=[int(x) for x in args.arch.split()])
dump = torch.load(args.load_path)
master_net.load_state_dict( dump['state_dict'] )
info('Loaded from %s' % args.load_path)

# Get encoder only
encoder = master_net.get_encoder()

# Get output embeddings
embeddings = torch.cat([ encoder(batch) for batch in dataloader ])

# Save embeddings
np.save( args.save_path, embeddings.detach().numpy() )
Ejemplo n.º 12
0
import numpy as np
import tensorflow as tf
import gym
import preprocess as pp
from gym.wrappers.monitor import Monitor
import network as tm
import torch
import os

os.environ['CUDA_VISIBLE_DEVICES']='-1'

pretrained_model = './car_racing_largenet_p20_unstructured_finetuned.pth'

print("CUDA Available: ",torch.cuda.is_available())
device = torch.device("cuda" if (torch.cuda.is_available()) else "cpu")
model = tm.Net().double()
model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))

#model.summary()

env = gym.make('CarRacing-v0').unwrapped
env.reset()

env_act = np.array([[0,1,0]], dtype=np.float32)
model_state = np.zeros((1,96,96,3))
processed_state = np.zeros((1,96,96,1))
preprocessed_state_torch = torch.zeros((1,96,96,1))

#For capturing video
#env = Monitor(env, "video-test2", force=True)
Ejemplo n.º 13
0
    return image


def get_response():
    while (True):
        k = cv2.waitKey(1)
        if k == 27:
            print("rejected!")
            return False
        elif k == 32:
            print("accepted!")
            return True


weights_path = "net_epoch99"
model = network.Net()
model.load_state_dict(torch.load(weights_path, map_location='cpu'))

out_heat = {}

scales = [1]  #, 1.5, 2, 3]
s_i = 0
threshold_NN = 4

all_pics = []

for root, dirs, files in os.walk("val_set", topdown=False):
    for name in files:
        if name.endswith("jpg"):
            all_pics.append(os.path.join(root, name))
Ejemplo n.º 14
0
def main(dataset_folder: str,
         net_hyper: Dict,
         fit_hyper: Dict,
         record_folder: str):

    # read data
    xdata, xlookup, ydata, ylookup, word_embedding = inout.read(dataset_folder)

    # convert
    x = convert.digitize_xdata(xdata, xlookup)
    y = convert.digitize_ydata(
        ydata, ylookup, fit_hyper['class_num'])

    # device
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    # create data bunch
    data_bunch = feed.create_data_bunch(
        x, y, fit_hyper['batch_size'], device, fit_hyper['is_formal'])

    # create net
    net = network.Net(net_hyper, word_embedding).to(device)

    # loss_fn
    loss_fn = loss.LossFn(balance_weight=fit_hyper['balance_weight'])

    # stop_fn, for nomc, stop fn will not be used in deed. because nomc never stop by truly early stop.
    stop_fn = partial(EarlyStoppingCallback,
                      monitor='f1value08', mode='max',
                      patience=fit_hyper['patience'])

    # save fn
    # save_model_fn = partial(
    #     SaveModelCallback, monitor='f1value07', mode='max')
    # save_model_fn = partial(
    #     SaveModelCallback, every=500)

    # save prediction fn
    # save_prediction_fn = partial(SavePredictionCallback,
    #                              monitor='f1value07', mode='max',
    #                              ylookup=ylookup['tag_lookup'],
    #                              record_folder=record_folder)
    # save_prediction_fn = partial(SavePredictionCallback,
    #                              every=500,
    #                              ylookup=ylookup['tag_lookup'],
    #                              record_folder=record_folder)

    # metric fn
    (fake_metric,
     precision_01, recall_01, f1value_01,
     precision_02, recall_02, f1value_02,
     precision_03, recall_03, f1value_03,
     precision_04, recall_04, f1value_04,
     precision_05, recall_05, f1value_05,
     precision_06, recall_06, f1value_06,
     precision_07, recall_07, f1value_07,
     precision_08, recall_08, f1value_08,
     precision_09, recall_09, f1value_09) = metric.create_metrics(fit_hyper['is_topk'])

    # create learner
    learner = MyLearner(data_bunch, net,
                        opt_func=optim.Adam, loss_func=loss_fn,
                        metrics=[fake_metric,
                                 precision_01, recall_01, f1value_01,
                                 precision_02, recall_02, f1value_02,
                                 precision_03, recall_03, f1value_03,
                                 precision_04, recall_04, f1value_04,
                                 precision_05, recall_05, f1value_05,
                                 precision_06, recall_06, f1value_06,
                                 precision_07, recall_07, f1value_07,
                                 precision_08, recall_08, f1value_08,
                                 precision_09, recall_09, f1value_09],
                        true_wd=False, bn_wd=False,
                        wd=fit_hyper['weight_decay'], train_bn=False,
                        path=record_folder, model_dir='models',
                        callback_fns=[
                            stop_fn,
                            partial(SaveModelCallback, every=500),
                            partial(SavePredictionCallback, every=500, ylookup=ylookup['tag_lookup'], record_folder=record_folder)])
    # learner = MyLearner(data_bunch, net,
    #                     opt_func=optim.Adam, loss_func=loss_fn,
    #                     metrics=[fake_metric,
    #                              precision_01, recall_01, f1value_01,
    #                              precision_02, recall_02, f1value_02,
    #                              precision_03, recall_03, f1value_03,
    #                              precision_04, recall_04, f1value_04,
    #                              precision_05, recall_05, f1value_05,
    #                              precision_06, recall_06, f1value_06,
    #                              precision_07, recall_07, f1value_07,
    #                              precision_08, recall_08, f1value_08,
    #                              precision_09, recall_09, f1value_09],
    #                     true_wd=False, bn_wd=False,
    #                     wd=fit_hyper['weight_decay'], train_bn=False,
    #                     path=record_folder, model_dir='models',
    #                     callback_fns=[
    #                         stop_fn,
    #                         partial(SaveModelCallback, monitor='f1value01', mode='max'),
    #                         partial(SaveModelCallback, monitor='f1value02', mode='max'),
    #                         partial(SaveModelCallback, monitor='f1value03', mode='max'),
    #                         partial(SaveModelCallback, monitor='f1value04', mode='max'),
    #                         partial(SaveModelCallback, monitor='f1value05', mode='max'),
    #                         partial(SaveModelCallback, monitor='f1value06', mode='max'),
    #                         partial(SaveModelCallback, monitor='f1value07', mode='max'),
    #                         partial(SaveModelCallback, monitor='f1value08', mode='max'),
    #                         partial(SaveModelCallback, monitor='f1value09', mode='max'),
    #                         partial(SavePredictionCallback, monitor='f1value01', mode='max', ylookup=ylookup['tag_lookup'], record_folder=record_folder),
    #                         partial(SavePredictionCallback, monitor='f1value02', mode='max', ylookup=ylookup['tag_lookup'], record_folder=record_folder),
    #                         partial(SavePredictionCallback, monitor='f1value03', mode='max', ylookup=ylookup['tag_lookup'], record_folder=record_folder),
    #                         partial(SavePredictionCallback, monitor='f1value04', mode='max', ylookup=ylookup['tag_lookup'], record_folder=record_folder),
    #                         partial(SavePredictionCallback, monitor='f1value05', mode='max', ylookup=ylookup['tag_lookup'], record_folder=record_folder),
    #                         partial(SavePredictionCallback, monitor='f1value06', mode='max', ylookup=ylookup['tag_lookup'], record_folder=record_folder),
    #                         partial(SavePredictionCallback, monitor='f1value07', mode='max', ylookup=ylookup['tag_lookup'], record_folder=record_folder),
    #                         partial(SavePredictionCallback, monitor='f1value08', mode='max', ylookup=ylookup['tag_lookup'], record_folder=record_folder),
    #                         partial(SavePredictionCallback, monitor='f1value09', mode='max', ylookup=ylookup['tag_lookup'], record_folder=record_folder),
    #                         ])

    # start fit
    learner.fit(fit_hyper['epoch_num'],
                lr=fit_hyper['learning_rate'])

    # write data
    train_losses = [x.cpu().numpy().tolist()
                    for x in learner.recorder.losses]
    valid_losses = [x.tolist()
                    for x in learner.recorder.val_losses]
    prf_values = [x[1:]
                  for x in learner.recorder.metrics]
    inout.write(train_losses, valid_losses, prf_values, record_folder)
Ejemplo n.º 15
0
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import network
import cv2
import os
from network_data_formation import data_processing, visualize_predictions_and_ground_truth

use_GPU = 0

net = network.Net()

if (use_GPU):
    gpus = '0'
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    net = torch.nn.DataParallel(net).cuda()
else:
    device = torch.device('cpu')

weight_path = './weights/net.pth'
if (os.path.isfile(weight_path)):

    if (use_GPU):
        net.load_state_dict(torch.load(weight_path))
    else:
        state_dict = torch.load(weight_path)
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
Ejemplo n.º 16
0
def main(dataset_folder: str,
         net_hyper: Dict,
         fit_hyper: Dict,
         record_folder: str):

    # read data
    xdata, xlookup, ydata, ylookup, word_embedding = inout.read(dataset_folder)

    # convert
    x = convert.digitize_xdata(xdata, xlookup)
    y = convert.digitize_ydata(
        ydata, ylookup, fit_hyper['class_num'])

    # device
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    # create data bunch
    data_bunch = feed.create_data_bunch(
        x, y, fit_hyper['batch_size'], device, fit_hyper['is_formal'])

    # create net
    net = network.Net(net_hyper, word_embedding).to(device)

    # loss_fn
    loss_fn = loss.LossFn(class_num=fit_hyper['class_num'],
                          o_index=fit_hyper['o_index'],
                          balance_weight=fit_hyper['balance_weight'],
                          device=device)

    # stop_fn
    stop_fn = partial(EarlyStoppingCallback,
                      monitor='f1calculation', mode='max',
                      patience=fit_hyper['patience'])

    # save fn
    # save_model_fn = partial(
    #     SaveModelCallback, monitor='f1calculation', mode='max')
    save_model_fn = partial(
        SaveModelCallback, every=500)

    # save prediction fn
    # save_prediction_fn = partial(SavePredictionCallback,
    #                              monitor='f1calculation', mode='max',
    #                              ylookup=ylookup['tag_lookup'],
    #                              record_folder=record_folder)
    save_prediction_fn = partial(SavePredictionCallback,
                                 every=500,
                                 ylookup=ylookup['tag_lookup'],
                                 record_folder=record_folder)

    # metric fn
    (accuracy_fn, f1_fn, precision_fn,
     recall_fn) = metric.create_metrics(fit_hyper['o_index'])

    # create learner
    learner = MyLearner(data_bunch, net,
                        opt_func=optim.Adam, loss_func=loss_fn,
                        metrics=[accuracy_fn, f1_fn, precision_fn, recall_fn],
                        true_wd=False, bn_wd=False,
                        wd=fit_hyper['weight_decay'], train_bn=False,
                        path=record_folder, model_dir='models',
                        callback_fns=[stop_fn, save_model_fn,
                                      save_prediction_fn])
    # callbacks=[m_debuger])

    # start fit
    learner.fit(fit_hyper['epoch_num'],
                lr=fit_hyper['learning_rate'])

    # write data
    train_losses = [x.cpu().numpy().tolist()
                    for x in learner.recorder.losses]
    valid_losses = [x.tolist()
                    for x in learner.recorder.val_losses]
    acc_values = [x[0]
                  for x in learner.recorder.metrics]
    prf_values = [[x[1], x[2], x[3]]
                  for x in learner.recorder.metrics]
    inout.write(train_losses, valid_losses,
                acc_values, prf_values, record_folder)
Ejemplo n.º 17
0
import dataset
import environment
import network
import dq_agent

data = dataset.Wiper_System_Dataset()
env = environment.Environment()
model = network.Net(env.n_features).to(device)
DQN = dq_agent.Deep_Q_Agent()

class Testing():
    def __init__(self, data, env, model, DQN):
        self.dataset_obj = data
        self._env = env
        self.model_obj = model
        self.dqn_obj = DQN
        
        self.iter_length = len(self.dataset_obj)
        self.test_accuracy_frac_same_action = []
        
    def run_test(self):
        self.model_obj.eval()
        action_true_list = []
        action_predict_list = []
        for idx in range(self.iter_length):
            state, true = self.dataset_obj[idx]
            predict = torch.argmax(self.dqn_obj.get_Q(state), dim=1)
            self.test_accuracy_frac_same_action.append(int(predict==true))
            action_true_list.append(true)
            action_predict_list.append(predict)
        
Ejemplo n.º 18
0
    def __init__(self):
        cnnNet = network.Net()
        cnnNet.apply(self.init_weights)

        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
Ejemplo n.º 19
0
 def __init__(self):
     self.net = network.Net().cuda()
Ejemplo n.º 20
0
def main():
    # Use a standard transformation on all datasets.
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    # Load the CIFAR10 training and test sets.
    train_set = datasets.CIFAR10(data_path,
                                 train=True,
                                 download=True,
                                 transform=transform)
    test_set = datasets.CIFAR10(data_path,
                                train=False,
                                download=True,
                                transform=transform)
    # Create task datasets and data loaders for both training and testing.
    tasks = [(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)]

    # define the datasets and data loaders for training
    task_train_dss = [FilteredDataset(train_set, classes) for classes in tasks]
    task_train_dls = [
        DataLoader(ds, batch_size=batch_size) for ds in task_train_dss
    ]

    # define the datasets and data loaders for validation
    task_test_dss = [FilteredDataset(test_set, classes) for classes in tasks]
    task_test_dls = [
        DataLoader(ds, batch_size=batch_size) for ds in task_test_dss
    ]

    net = network.Net()
    criterion = nn.CrossEntropyLoss()
    # define a stochastic gradient descent optimizer
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

    net.train()
    initial_accuracy = []
    i = 0
    for task in tasks:
        print(f'Network with task', i, 'with class ', classes[task[0]],
              classes[task[1]])
        net.add_task(len(task))
        net.to(device)
        net.set_tasks([i])
        net.train()
        train(net, task_train_dls[i], task, criterion, optimizer, num_epochs)
        initial_accuracy.append(task_accuracy(net, task_test_dls[i], task))
        i = i + 1

    for i in range(len(initial_accuracy)):
        writer.add_scalar("accuracy", initial_accuracy[i], i + 1)

    writer.close()
    net.eval()
    final_accuracy = []
    i = 0
    for task in tasks:
        final_accuracy.append(task_accuracy(net, task_test_dls[i], task))
        i = i + 1
    for i in range(len(final_accuracy)):
        writer.add_scalar("accuracy", final_accuracy[i], i + 1)

    x = [1, 2, 3, 4, 5]

    plt.plot(x, initial_accuracy, color="red", marker="o", label="initial")
    plt.plot(x, final_accuracy, color="blue", marker="o", label="final")
    plt.legend()
    plt.show()

    forgetting = []

    for i in range(5):
        forgetting.append(initial_accuracy[i] - final_accuracy[i])
        writer.add_scalar("forgetting", forgetting[i], i + 1)
    plt.plot(x, forgetting, label="forgetting")
    plt.legend()
    plt.show()