Exemplo n.º 1
0
    def __init__(self, config_path, dev_type=DataLoader.DEVICE_CAM):
        """ dev_type is device type (Dataloader.py) """

        self.config_path = config_path
        self.data = None 
        self.config = None 

        self.pictures = list()
        self.email = None

        self.contour_area_restrict = 15000

        def make_config_obj():
            try:
                with open(self.config_path) as config_file:
                    self.config = json.load(config_file,
                            object_hook=lambda d: collections.namedtuple('Config', d.keys())(*d.values())) 

            except Exception as e:
                sys.stderr.write("Probably you do not have a proper json file or file is missing")
                raise(e)


        make_config_obj()
        self.data = DataLoader(dev_type, self.config.cam)
        self.deque = collections.deque(maxlen=self.config.opencv.frames.avarage) 
        self.email = Email(self.config.email)
Exemplo n.º 2
0
def test_dataset(data):
    batchSize = 2
    y_test, x_test = data['B'], data['A']
    x_path = data['A_paths']
    # print(y_train.shape)
    dataSet = MyDataset3(x_test, y_test,x_path)
    loader = DataLoader()
    loader.initialize(dataSet, batchSize, shuffle=False)
    dataset = loader.load_data()
    return dataset
Exemplo n.º 3
0
def train_logistic_regression(n, d, n_epoch, batch_size, b_init, l_rate):
    # Generate the data for a coefficient vector & init progress tracker!
    data_loader = DataLoader(n, d, batch_size, binary=True)
    b_hist, func_val_hist, param_error, acc_hist = [], [], [], []

    # Get the coefficients as solution to optimized sklearn function
    logreg = LogisticRegression(penalty='none',
                                solver='lbfgs',
                                multi_class='multinomial')
    logreg.fit(data_loader.X, data_loader.y)
    norm_coeff = np.linalg.norm(logreg.coef_.ravel())

    b_dual = DualTensor(b_init, None)

    for epoch in range(n_epoch):
        # Shuffle the batch identities at beginning of each epoch
        data_loader.batch_shuffle()
        for batch_id in range(data_loader.num_batches):
            # Clear the gradient
            b_dual.zero_grad()

            # Select the current batch & perform "mini-forward" pass
            X, y = data_loader.get_batch_idx(batch_id)
            y_pred_1, y_pred_2 = forward(X, b_dual)

            # Calculate the forward AD - real = func, dual = deriv
            current_dual, acc = binary_cross_entropy_dual(
                y, y_pred_1, y_pred_2)

            # Perform grad step & append results to the placeholder list
            b_dual.real -= l_rate * np.array(current_dual.dual).flatten()
            b_hist.append(b_dual.real)

            func_val_hist.append(current_dual.real)

            param_error.append(
                np.linalg.norm(logreg.coef_.ravel() - b_hist[-1]) / norm_coeff)
            acc_hist.append(acc)

        if np.abs(param_error[-1] - param_error[-2]) < 0.00001:
            break

        if epoch % 1 == 0:
            print(
                "Accuracy: {} | Euclidean Param Norm: {} | fct min: {}".format(
                    acc, param_error[-1], current_dual.real))
    return b_hist, func_val_hist, param_error, acc_hist
Exemplo n.º 4
0
def test(model, index=''):
    g = torch.load(model)
    batchSize = 2
    # data = load_images('./images/test', n_images=20)
    data = load_images('./images/test', -1)
    y_train, x_train = data['B'], data['A']
    print(data['A_paths'])
    # print(y_train.shape)
    dataSet = MyDataset(x_train, y_train)
    loader = DataLoader()
    loader.initialize(dataSet, batchSize, shuffle=False)
    dataset = loader.load_data()
    for step, (x, y) in enumerate(dataset):
        img = g(x)
        # print(img.shape)
        # print(y.shape)
        res = np.concatenate((img.data.cpu().numpy(), x.data.cpu().numpy(), y.data.cpu().numpy()), axis=3)
        for i in range(x.shape[0]):
            # print(res[i].shape)
            save_image(res[i], './res/img/res{}_{}.png'.format(index, str(step * x.shape[0] + i)))
            print('./res/img/res{}_{}.png'.format(index, str(step * batchSize + i)))
Exemplo n.º 5
0
    t_test = ja_vocab.transform(ja_test_path, eos=True)

    def sort(x, t):
        lens = [len(i) for i in x]
        indices = sorted(range(len(lens)), key=lambda i: -lens[i])
        x = [x[i] for i in indices]
        t = [t[i] for i in indices]

        return (x, t)

    (x_train, t_train) = sort(x_train, t_train)
    (x_val, t_val) = sort(x_val, t_val)
    (x_test, t_test) = sort(x_test, t_test)

    train_dataloader = DataLoader((x_train, t_train),
                                  batch_first=False,
                                  device=device)
    val_dataloader = DataLoader((x_val, t_val),
                                batch_first=False,
                                device=device)
    test_dataloader = DataLoader((x_test, t_test),
                                 batch_size=1,
                                 batch_first=False,
                                 device=device)
    '''
    2. モデルの構築
    '''
    depth_x = len(en_vocab.i2w)
    depth_t = len(ja_vocab.i2w)

    input_dim = depth_x
Exemplo n.º 6
0
from Dataloader import DataLoader
from sklearn import preprocessing
from sklearn.svm import SVC
import pandas as pd
import numpy as np


def countAnamoly(y_pred):
    count = 0
    for i in y_pred:
        if i == True:
            count += 1
    # Number of predicted anamoly
    print(count)

DataLoader = DataLoader()
data = DataLoader.get_dataset()

# Using SVM to create models with several trails
# Normalize the data
X = data.drop('result', axis=1)
y = data['result']
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(X)
X = pd.DataFrame(x_scaled)

inputs = [0.01, 0.1, 1, 10, 100]
for i in inputs:
    svclassifier = SVC(kernel = 'rbf', C = i, class_weight="balanced")
    svclassifier.fit(X, y)
    y_pred = svclassifier.predict(X)
Exemplo n.º 7
0
def train_multiple_outputs(continue_train=False, Gmodel=None, Dmodel=None, index='00', id=0):
    data = load_images('./images/train', n_images=1000)
    y_train, x_train = data['B'], data['A']
    print(y_train.shape[0])
    dataSet = MyDataset(x_train, y_train)
    loader = DataLoader()
    loader.initialize(dataSet, BATCH_SIZE)
    dataset = loader.load_data()
    testData = load_images('./images/test', -1)
    testDataset = test_dataset(testData)
    print('continue_train: ', continue_train)
    if continue_train:
        if Gmodel == None:
            g = Gnet().double()
            print('creat new Gnet')
        else:
            g = torch.load(Gmodel)
            print('using ', Gmodel)
        if DataLoader == None:
            d = NLayerDiscriminator().double()
            print('creat new Dnet')
        else:
            d = torch.load(Dmodel)
            print('using ', Dmodel)
        # dOnG = torch.load('DOnGnet')
    else:
        g = Gnet().double()
        print('creat new Gnet')
        d = NLayerDiscriminator().double()
        print('creat new Dnet')
        # dOnG = generator_containing_discriminator_multiple_outputs(Gnet=g, Dnet=d)
    g.cuda()
    d.cuda()
    # dOnG.cuda()
    optimizerG = torch.optim.Adam(g.parameters(), lr=LR)
    optimizerD = torch.optim.Adam(d.parameters(), lr=LR)
    loss_function = nn.MSELoss().cuda(device='0')
    l1Loss = nn.L1Loss().cuda(device='0')
    perceptauLoss = PerceptualLoss_v2().cuda(device='0')
    perceptauLoss.initialize(l1Loss)
    wassesrsteinLoss = WassesrsteinLoss().cuda(device='0')
    now = datetime.datetime.now()
    start = time.time()
    for i in range(8):
        GLoss = []
        DLoss = []
        for step, (x, y) in enumerate(dataset):
            x_f = g(x)
            for _ in range(2):
                dReal = d(y)
                dFalse = d(x_f)
                dLossReal = wassesrsteinLoss(dReal, outputTrueBatch)
                dLossFalse = wassesrsteinLoss(dFalse, outputFalseBatch)
                dLoss = 0.5 * (dLossReal + dLossFalse)

                optimizerD.zero_grad()
                dLoss.backward(retain_graph=True)
                optimizerD.step()
                DLoss.append(dLoss.data.cpu().numpy())
            #
            #
            dFalse = d(x_f)
            dLoss = loss_function(dFalse, outputTrueBatch)
            # pLoss = perceptauLoss(x_f, y)
            gLoss = dLoss
            optimizerG.zero_grad()
            gLoss.backward()
            optimizerG.step()
            GLoss.append(gLoss.data.cpu().numpy())
            # print('gloss: ', gLoss.data.cpu().numpy())

            if step % 10 == 0:
                # now = datetime.datetime.now()
                print('-' * 10, ' ', id, ': Saving model\trun time: ',
                      int((time.time() - start) / 60), 'm', int((time.time() - start) % 60), 's ', '-' * 10)
                start = time.time()
                save_dir = os.path.join(BASE_DIR, '{}{}_{}'.format(now.month, now.day, index))
                if not os.path.exists(save_dir):
                    os.makedirs(save_dir)
                print('dloss: ', np.mean(DLoss), '\tploss: ', np.mean(GLoss))
                torch.save(g, os.path.join(save_dir, '{}_Gnet_{}'.format(index, id)))
                torch.save(d, os.path.join(save_dir, '{}_Dnet_{}'.format(index, id)))
                with open(os.path.join(save_dir, '{}_log.txt'.format(index)), 'a+') as f:
                    f.write('{}\t{}\t{}\r\n'.format(id, np.mean(DLoss), np.mean(GLoss)))
                # id = id + 1
                test_img(g, id, save_dir, testDataset)
                id = id + 1
Exemplo n.º 8
0
def train_multiple_outputs(continue_train=False, Gmodel=None, Dmodel=None, index='00', id=0):
    data = load_images('./images/train', n_images=1000)
    y_train, x_train = data['B'], data['A']
    print(y_train.shape[0])
    dataSet = MyDataset(x_train, y_train)
    loader = DataLoader()
    loader.initialize(dataSet, BATCH_SIZE)
    dataset = loader.load_data()
    print('continue_train: ', continue_train)
    testData = load_images('./images/test', -1)
    testDataset = test_dataset(testData)
    LOSS = 1000
    print(LR)
    if continue_train:
        if Gmodel == None:
            g = Gnet().double()
            print('creat new Gnet')
        else:
            g = torch.load(Gmodel)
            print('using ', Gmodel)
        if DataLoader == None:
            d = NLayerDiscriminator().double()
            print('creat new Dnet')
        else:
            d = torch.load(Dmodel)
            print('using ', Dmodel)
        # dOnG = torch.load('DOnGnet')
    else:
        g = Gnet().double()
        print('creat new Gnet')
        d = NLayerDiscriminator().double()
        print('creat new Dnet')
        # dOnG = generator_containing_discriminator_multiple_outputs(Gnet=g, Dnet=d)
    g.cuda()
    d.cuda()
    # dOnG.cuda()
    optimizerG = torch.optim.Adam(g.parameters(), lr=LR, betas=(0.9, 0.999), eps=1e-8)
    optimizerD = torch.optim.Adam(d.parameters(), lr=LR, betas=(0.9, 0.999), eps=1e-8)
    # optimizerDOnG = torch.optim.Adam(dOnG.parameters(), lr=LR)
    loss_function = nn.MSELoss().cuda(device='0')
    l1Loss = nn.L1Loss().cuda(device='0')
    perceptauLoss = PerceptualLoss_v2().cuda(device='0')
    perceptauLoss.initialize(loss_function)
    wassesrsteinLoss = WassesrsteinLoss().cuda(device='0')
    now = datetime.datetime.now()
    start = time.time()
    for i in range(8):
        #     i = 1
        GLoss = []
        DLoss = []
        # if i % 2 == 0:
        #     lr = LR * (0.1 ** int(i/2))
        #     print('LR: ', lr)
        #     for para_group in optimizerG.param_groups:
        #         para_group['lr'] = lr
        #     for para_group in optimizerD.param_groups:
        #         para_group['lr'] = lr
        for step, (x, y) in enumerate(dataset):
            x_f = g(x)
            for _ in range(5):
                dReal = d(y)
                dFalse = d(x_f)
                # print(dReal.shape)
                # print(type(outputTrueBatch))
                dLossReal = wassesrsteinLoss(dReal, outputTrueBatch)
                # dLossReal = wassesrsteinLoss(dReal, 1)
                # print(dLossReal)
                dLossFalse = wassesrsteinLoss(dFalse, outputFalseBatch)
                # dLossFalse = wassesrsteinLoss(dFalse, -1)
                dLoss = 0.5 * (dLossReal + dLossFalse)

                optimizerD.zero_grad()
                dLoss.backward(retain_graph=True)
                optimizerD.step()
                DLoss.append(dLoss.data.cpu().numpy())
            #
            #
            dFalse = d(x_f)
            dLoss = wassesrsteinLoss(dFalse, outputTrueBatch)
            pLoss = perceptauLoss(x_f, y)
            gLoss = 100 * pLoss + dLoss
            # gLoss = 100 * perceptauLoss(x_f, y) + wassesrsteinLoss(x_f, y)
            optimizerG.zero_grad()
            gLoss.backward()
            optimizerG.step()
            LOSS_NOW = 100 * pLoss.data.cpu().numpy()
            # print('step', step, ': ', lossG.data.cpu().numpy())
            GLoss.append(LOSS_NOW)
            # print('Dloss: ', np.mean(DLoss), 'dloss: ', dLoss.data.cpu().numpy(), '\tgloss: ', gLoss.data.cpu().numpy(), '\tperceptauLoss: ', pLoss.data.cpu().numpy())
            # print('dloss: ', dLoss.data.cpu().numpy(), '\tgloss: ', LOSS_NOW,
            #       '\tperceptauLoss: ', pLoss.data.cpu().numpy())

            if gLoss.data.cpu().numpy() < LOSS:
                # now = datetime.datetime.now()
                save_dir = os.path.join(BASE_DIR, '{}{}_{}'.format(now.month, now.day, index))
                if not os.path.exists(save_dir):
                    os.makedirs(save_dir)
                torch.save(g, os.path.join(save_dir, '{}_Gnet_{}_{}'.format(index, 'bestGloss', int(LOSS_NOW))))
                LOSS = LOSS_NOW
                print('best model saved!!\t', save_dir, '{}_Gnet_{}_{}'.format(index, 'bestGloss', LOSS_NOW))
                save_dir = os.path.join(BASE_DIR, '{}{}_{}'.format(now.month, now.day, index))
                if not os.path.exists(save_dir):
                    os.makedirs(save_dir)
                test_img(g, '{}_{}'.format('bestGloss', LOSS_NOW), save_dir, testDataset)



            if step % 10 == 0:
                # now = datetime.datetime.now()
                print('-' * 10, ' ', id, ': Saving model\trun time: ',
                      int((time.time() - start) / 60), 'm', int((time.time() - start) % 60), 's ', '-' * 10)
                start = time.time()
                currentLoss = int(np.mean(GLoss))
                save_dir = os.path.join(BASE_DIR, '{}{}_{}'.format(now.month, now.day, index))
                if not os.path.exists(save_dir):
                    os.makedirs(save_dir)
                print('dloss: ', np.mean(DLoss), '\tploss: ', np.mean(GLoss))
                torch.save(g, os.path.join(save_dir, '{}_Gnet_{}_{}'.format(index, id, currentLoss)))
                torch.save(d, os.path.join(save_dir, '{}_Dnet_{}'.format(index, id)))
                with open(os.path.join(save_dir, '{}_log.txt'.format(index)), 'a+') as f:
                    f.write('{}\t{}\t{}\r\n'.format(id, np.mean(DLoss), np.mean(GLoss)))
                # id = id + 1
                test_img(g, id, save_dir, testDataset)
                id = id + 1
Exemplo n.º 9
0
 def val_dataloader(self):
     test_loader = DataLoader(self.test_dataset,
                              batch_size=self.arg.batch_size,
                              shuffle=False)
     return test_loader
Exemplo n.º 10
0
 def train_dataloader(self):
     train_loader = DataLoader(self.train_dataset,
                               batch_size=self.arg.batch_size,
                               shuffle=True)
     return train_loader
Exemplo n.º 11
0
class Surveillance(object):

    def __init__(self, config_path, dev_type=DataLoader.DEVICE_CAM):
        """ dev_type is device type (Dataloader.py) """

        self.config_path = config_path
        self.data = None 
        self.config = None 

        self.pictures = list()
        self.email = None

        self.contour_area_restrict = 15000

        def make_config_obj():
            try:
                with open(self.config_path) as config_file:
                    self.config = json.load(config_file,
                            object_hook=lambda d: collections.namedtuple('Config', d.keys())(*d.values())) 

            except Exception as e:
                sys.stderr.write("Probably you do not have a proper json file or file is missing")
                raise(e)


        make_config_obj()
        self.data = DataLoader(dev_type, self.config.cam)
        self.deque = collections.deque(maxlen=self.config.opencv.frames.avarage) 
        self.email = Email(self.config.email)


    def run(self):

        avarage_frame = 0
        avarage_frame_init = False

        snapshot_time = datetime.datetime.now()
        reference_time = datetime.datetime.now()

        for dframe in self.data.get_data_frame():
            #cv2.imshow('Original', dframe)
            gray_scaled_frame = cv2.cvtColor(dframe, cv2.COLOR_BGR2GRAY)
    

            gaussian_blur_frame = (cv2.GaussianBlur(gray_scaled_frame,
                                    (self.config.opencv.filter.GaussianBlur.mask.row,
                                    self.config.opencv.filter.GaussianBlur.mask.column), 0))

            #cv2.imshow('Gaussian blur', gaussian_blur_frame)

            _, binary_frame = cv2.threshold(gaussian_blur_frame, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            #cv2.imshow('Binary frame', binary_frame)

            #Initialization
            if self.deque.maxlen > len(self.deque):
                print("Waiting for another frame")
                self.deque.append(binary_frame.astype('uint16'))
                continue

            elif self.deque.maxlen == len(self.deque)  and not avarage_frame_init:
                avarage_frame = sum(self.deque)/self.deque.maxlen
                avarage_frame_init = True
                continue

            avarage_frame = sum((self.deque))/self.deque.maxlen

            without_backgroung_frame =  avarage_frame - binary_frame

            #cv2.imshow('Pict without background', without_backgroung_frame)
    
            erode_frame = cv2.erode(without_backgroung_frame.astype(np.uint8), np.ones((11, 11), np.uint8), iterations=1)
            dilate_frame = cv2.dilate(erode_frame, np.ones((15, 15), np.uint8), iterations=5)

            #cv2.imshow('Erode', erode_frame)
            #cv2.imshow('Dilate', dilate_frame)

            img, contours,hierarchy = cv2.findContours(dilate_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
            #cv2.imshow('Output with contours',cv2.drawContours(dframe.copy() , contours, -1, (0, 255, 0), 3))
   
            snapshot_time = datetime.datetime.now()
            

            for contour in contours:
                if cv2.contourArea(contour) < self.contour_area_restrict:
                    continue


                cnt = np.array(contour).reshape((-1,1,2)).astype(np.int32)
                cv2.drawContours(dframe, cnt, -1, (0, 255,255), 2)
                cv2.imshow("omg2", dframe)
                img_file = BytesIO(cv2.imencode('.png', dframe)[1]) 
            
                
                self.pictures.append(img_file)
                    
            if (snapshot_time - reference_time).seconds > int(self.config.email.user.timeout) and len(self.pictures):
                #send email
                self.email.send(self.pictures)

                reference_time = snapshot_time
                self.pictures = []



            self.deque.append(binary_frame.astype('uint16'))
            avarage_frame = sum(self.deque)/self.deque.maxlen
Exemplo n.º 12
0
def main(arg):
    # Set random seeds
    torch.manual_seed(42)
    torch.cuda.manual_seed(42)
    np.random.seed(42)
    torch.backends.cudnn.deterministic = True
    torch.manual_seed(42)
    rng = np.random.RandomState(42)

    # Get args
    bn = arg.batch_size
    mode = arg.mode
    name = arg.name
    load_from_ckpt = arg.load_from_ckpt
    lr = arg.lr
    L_inv_scal = arg.L_inv_scal
    epochs = arg.epochs
    device = torch.device(
        'cuda:' + str(arg.gpu[0]) if torch.cuda.is_available() else 'cpu')
    arg.device = device

    # Load Datasets and DataLoader
    dataset = get_dataset(arg.dataset)
    if arg.dataset == 'pennaction':
        init_dataset = dataset(size=arg.reconstr_dim,
                               action_req=[
                                   "tennis_serve", "tennis_forehand",
                                   "baseball_pitch", "baseball_swing",
                                   "jumping_jacks", "golf_swing"
                               ])
        splits = [
            int(len(init_dataset) * 0.8),
            len(init_dataset) - int(len(init_dataset) * 0.8)
        ]
        train_dataset, test_dataset = torch.utils.data.random_split(
            init_dataset, splits)
    else:
        train_dataset = dataset(size=arg.reconstr_dim, train=True)
        test_dataset = dataset(size=arg.reconstr_dim, train=False)
    train_loader = DataLoader(train_dataset,
                              batch_size=bn,
                              shuffle=True,
                              num_workers=4)
    test_loader = DataLoader(test_dataset,
                             batch_size=bn,
                             shuffle=False,
                             num_workers=4)

    if mode == 'train':
        # Make new directory
        model_save_dir = '../results/' + name
        if not os.path.exists(model_save_dir):
            os.makedirs(model_save_dir)
            os.makedirs(model_save_dir + '/summary')

        # Save Hyperparameters
        write_hyperparameters(arg.toDict(), model_save_dir)

        # Define Model
        model = Model(arg).to(device)
        if load_from_ckpt:
            model = load_model(model, model_save_dir, device).to(device)
        print(f'Number of Parameters: {count_parameters(model)}')

        # Definde Optimizer
        optimizer = torch.optim.Adam(model.parameters(), lr=lr)

        # Log with wandb
        wandb.init(project='Disentanglement', config=arg, name=arg.name)
        wandb.watch(model, log='all')

        # Make Training
        with torch.autograd.set_detect_anomaly(False):
            for epoch in range(epochs + 1):
                # Train on Train Set
                model.train()
                model.mode = 'train'
                for step, (original, keypoints) in enumerate(train_loader):
                    if epoch != 0:
                        model.L_sep = 0.
                    original, keypoints = original.to(device), keypoints.to(
                        device)
                    image_rec, reconstruct_same_id, loss, rec_loss, transform_loss, precision_loss, mu, L_inv, mu_original = model(
                        original)
                    mu_norm = torch.mean(torch.norm(
                        mu, p=1, dim=2)).cpu().detach().numpy()
                    L_inv_norm = torch.mean(
                        torch.linalg.norm(L_inv, ord='fro',
                                          dim=[2, 3])).cpu().detach().numpy()
                    # Track Mean and Precision Matrix
                    wandb.log({"Part Means": mu_norm})
                    wandb.log({"Precision Matrix": L_inv_norm})
                    # Zero out gradients
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()
                    # Track Loss
                    wandb.log({"Training Loss": loss})
                    # Track Metric
                    score = keypoint_metric(mu_original, keypoints)
                    wandb.log({"Metric Train": score})

                # Evaluate on Test Set
                model.eval()
                val_score = torch.zeros(1)
                val_loss = torch.zeros(1)
                for step, (original, keypoints) in enumerate(test_loader):
                    with torch.no_grad():
                        original, keypoints = original.to(
                            device), keypoints.to(device)
                        image_rec, reconstruct_same_id, loss, rec_loss, transform_loss, precision_loss, mu, L_inv, mu_original = model(
                            original)
                        # Track Loss and Metric
                        score = keypoint_metric(mu_original, keypoints)
                        val_score += score.cpu()
                        val_loss += loss.cpu()

                val_loss = val_loss / (step + 1)
                val_score = val_score / (step + 1)
                wandb.log({"Evaluation Loss": val_loss})
                wandb.log({"Metric Validation": val_score})

                # Track Progress & Visualization
                for step, (original, keypoints) in enumerate(test_loader):
                    with torch.no_grad():
                        model.mode = 'predict'
                        original, keypoints = original.to(
                            device), keypoints.to(device)
                        original_part_maps, mu_original, image_rec, part_maps, part_maps, reconstruction = model(
                            original)
                        # img = visualize_predictions(original, original_part_maps, keypoints, reconstruction, image_rec[:original.shape[0]],
                        #                    image_rec[original.shape[0]:], part_maps[original.shape[0]:], part_maps[:original.shape[0]],
                        # #                    L_inv_scal, model_save_dir + '/summary/', epoch, device, show_labels=False)
                        # if epoch % 5 == 0:
                        #     wandb.log({"Summary_" + str(epoch): [wandb.Image(img)]})
                        save_model(model, model_save_dir)

                        if step == 0:
                            break
                # Decrements
                # model.L_sep = arg.sig_decr * model.L_sep

    elif mode == 'predict':
        # Make Directory for Predictions
        model_save_dir = '../results/' + arg.dataset + '/' + name
        # Dont use Transformations
        arg.tps_scal = 0.
        arg.rot_scal = 0.
        arg.off_scal = 0.
        arg.scal_var = 0.
        arg.augm_scal = 1.
        arg.contrast = 0.
        arg.brightness = 0.
        arg.saturation = 0.
        arg.hue = 0.

        # Load Model and Dataset
        model = Model(arg).to(device)
        model = load_model(model, model_save_dir, device)
        model.eval()

        # Log with wandb
        # wandb.init(project='Disentanglement', config=arg, name=arg.name)
        # wandb.watch(model, log='all')

        # Predict on Dataset
        val_score = torch.zeros(1)
        for step, (original, keypoints) in enumerate(test_loader):
            with torch.no_grad():
                original, keypoints = original.to(device), keypoints.to(device)
                ground_truth_images, img_reconstr, mu, L_inv, part_map_norm, heat_map, heat_map_norm, total_loss = model(
                    original)
                score, mu_new, L_inv, part_map_norm_new, heat_map_new = keypoint_metric(
                    mu, keypoints, L_inv, part_map_norm, heat_map,
                    arg.reconstr_dim)
                if step == 0:
                    img = visualize_predictions(original, img_reconstr, mu_new,
                                                part_map_norm_new,
                                                heat_map_new, mu,
                                                part_map_norm, heat_map,
                                                model_save_dir)
                # wandb.log({"Prediction": [wandb.Image(img)]})
                val_score += score.cpu()

        val_score = val_score / (step + 1)
        print("Validation Score: ", val_score)
# 4. Print the results for accuracy and other performance parameters.
# 5. Repeat steps 2 to 4 for othe data models.
# ****************************************************************************************************************

from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfVectorizer
from Dataloader import DataLoader
from Preparedata import DataPreparer
from Classification import Classification
from sklearn.feature_extraction.text import CountVectorizer

# Name of dataset file to be placed in the working directory of the script
inputFile = "tweet_poll_event_dataset.xlsx"

# Step 1
data_loader = DataLoader(inputFile).loadData()

# Step 2
prepare_data_obama = DataPreparer(data_loader.sheet_obama)
prepare_data_obama.clean_data()
prepare_data_romney = DataPreparer(data_loader.sheet_romney)
prepare_data_romney.clean_data()

# Processing Obama data
#Vectorize the tweets to create sparse matrix for the words as features
# Experiments shows that using unigram features increases the accuracy for TF-IDF Vectorizer
print ":::: Obama Data ::::"
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5)
prepare_data_obama.createTrain_TestSet()
prepare_data_obama.vectorizeData(vectorizer)
Exemplo n.º 14
0
def main(arg):
    # Set random seeds
    torch.manual_seed(42)
    torch.cuda.manual_seed(42)
    np.random.seed(42)
    torch.backends.cudnn.deterministic = True
    torch.manual_seed(42)
    rng = np.random.RandomState(42)

    # Get args
    bn = arg.batch_size
    mode = arg.mode
    name = arg.name
    load_from_ckpt = arg.load_from_ckpt
    lr = arg.lr
    epochs = arg.epochs
    device = torch.device(
        'cuda:' + str(arg.gpu[0]) if torch.cuda.is_available() else 'cpu')
    arg.device = device

    # Load Datasets and DataLoader
    if arg.dataset != "mix":
        dataset = get_dataset(arg.dataset)
    if arg.dataset == 'pennaction':
        # init_dataset = dataset(size=arg.reconstr_dim, action_req=["tennis_serve", "tennis_forehand", "baseball_pitch",
        #                                                           "baseball_swing", "jumping_jacks", "golf_swing"])
        init_dataset = dataset(size=arg.reconstr_dim)
        splits = [
            int(len(init_dataset) * 0.8),
            len(init_dataset) - int(len(init_dataset) * 0.8)
        ]
        train_dataset, test_dataset = random_split(
            init_dataset, splits, generator=torch.Generator().manual_seed(42))
    elif arg.dataset == 'deepfashion':
        train_dataset = dataset(size=arg.reconstr_dim, train=True)
        test_dataset = dataset(size=arg.reconstr_dim, train=False)
    elif arg.dataset == 'human36':
        init_dataset = dataset(size=arg.reconstr_dim)
        splits = [
            int(len(init_dataset) * 0.8),
            len(init_dataset) - int(len(init_dataset) * 0.8)
        ]
        train_dataset, test_dataset = random_split(
            init_dataset, splits, generator=torch.Generator().manual_seed(42))
    elif arg.dataset == 'mix':
        # add pennaction
        dataset_pa = get_dataset("pennaction")
        init_dataset_pa = dataset_pa(size=arg.reconstr_dim,
                                     action_req=[
                                         "tennis_serve", "tennis_forehand",
                                         "baseball_pitch", "baseball_swing",
                                         "jumping_jacks", "golf_swing"
                                     ],
                                     mix=True)
        splits_pa = [
            int(len(init_dataset_pa) * 0.8),
            len(init_dataset_pa) - int(len(init_dataset_pa) * 0.8)
        ]
        train_dataset_pa, test_dataset_pa = random_split(
            init_dataset_pa,
            splits_pa,
            generator=torch.Generator().manual_seed(42))
        # add deepfashion
        dataset_df = get_dataset("deepfashion")
        train_dataset_df = dataset_df(size=arg.reconstr_dim,
                                      train=True,
                                      mix=True)
        test_dataset_df = dataset_df(size=arg.reconstr_dim,
                                     train=False,
                                     mix=True)
        # add human36
        dataset_h36 = get_dataset("human36")
        init_dataset_h36 = dataset_h36(size=arg.reconstr_dim, mix=True)
        splits_h36 = [
            int(len(init_dataset_h36) * 0.8),
            len(init_dataset_h36) - int(len(init_dataset_h36) * 0.8)
        ]
        train_dataset_h36, test_dataset_h36 = random_split(
            init_dataset_h36,
            splits_h36,
            generator=torch.Generator().manual_seed(42))
        # Concatinate all
        train_datasets = [train_dataset_df, train_dataset_h36]
        test_datasets = [test_dataset_df, test_dataset_h36]
        train_dataset = ConcatDataset(train_datasets)
        test_dataset = ConcatDataset(test_datasets)

    train_loader = DataLoader(train_dataset,
                              batch_size=bn,
                              shuffle=True,
                              num_workers=4)
    test_loader = DataLoader(test_dataset,
                             batch_size=bn,
                             shuffle=True,
                             num_workers=4)

    if mode == 'train':
        # Make new directory
        model_save_dir = '../results/' + arg.dataset + '/' + name
        if not os.path.exists(model_save_dir):
            os.makedirs(model_save_dir)
            os.makedirs(model_save_dir + '/summary')

        # Save Hyperparameters
        write_hyperparameters(arg.toDict(), model_save_dir)

        # Define Model
        model = Model(arg)
        if len(arg.gpu) > 1:
            model = torch.nn.DataParallel(model, device_ids=arg.gpu)
        model.to(device)
        if load_from_ckpt:
            model = load_model(model, model_save_dir, device).to(device)
        # Dataparallel
        print(arg.gpu)
        print(f'Number of Parameters: {count_parameters(model)}')

        # Definde Optimizer
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=lr,
                                     weight_decay=arg.weight_decay)
        scheduler = ReduceLROnPlateau(optimizer,
                                      factor=0.2,
                                      threshold=1e-4,
                                      patience=6)

        # Log with wandb
        wandb.init(project='Disentanglement', config=arg, name=arg.name)
        wandb.watch(model, log='all')

        # Make Training
        with torch.autograd.set_detect_anomaly(False):
            for epoch in range(epochs + 1):
                # Train on Train Set
                model.train()
                # model.mode = 'train'
                for step, (original, keypoints) in enumerate(train_loader):
                    bn = original.shape[0]
                    original, keypoints = original.to(device), keypoints.to(
                        device)
                    # Forward Pass
                    ground_truth_images, img_reconstr, mu, L_inv, part_map_norm, heat_map, heat_map_norm, total_loss = model(
                        original)
                    # Track Mean and Precision Matrix
                    mu_norm = torch.mean(torch.norm(
                        mu[:bn], p=1, dim=2)).cpu().detach().numpy()
                    L_inv_norm = torch.mean(
                        torch.linalg.norm(L_inv[:bn], ord='fro',
                                          dim=[2, 3])).cpu().detach().numpy()
                    wandb.log({"Part Means": mu_norm})
                    wandb.log({"Precision Matrix": L_inv_norm})
                    # Zero out gradients
                    optimizer.zero_grad()
                    total_loss.backward()
                    optimizer.step()
                    # Track Loss
                    wandb.log({"Training Loss": total_loss.cpu()})
                    # Track Metric
                    score, mu, L_inv, part_map_norm, heat_map = keypoint_metric(
                        mu, keypoints, L_inv, part_map_norm, heat_map,
                        arg.reconstr_dim)
                    wandb.log({"Metric Train": score})
                    # Track progress
                    if step % 10000 == 0 and bn >= 4:
                        for step_, (original,
                                    keypoints) in enumerate(test_loader):
                            with torch.no_grad():
                                original, keypoints = original.to(
                                    device), keypoints.to(device)
                                ground_truth_images, img_reconstr, mu, L_inv, part_map_norm,\
                                heat_map, heat_map_norm, total_loss = model(original)
                                # Visualize Results
                                score, mu, L_inv, part_map_norm, heat_map = keypoint_metric(
                                    mu, keypoints, L_inv, part_map_norm,
                                    heat_map, arg.reconstr_dim)
                                img = visualize_results(
                                    ground_truth_images, img_reconstr, mu,
                                    L_inv, part_map_norm, heat_map, keypoints,
                                    model_save_dir + '/summary/', epoch,
                                    arg.background)
                                wandb.log({
                                    "Summary at step" + str(step):
                                    [wandb.Image(img)]
                                })
                                save_model(model, model_save_dir)
                                if step_ == 0:
                                    break

                # Evaluate on Test Set
                model.eval()
                val_score = torch.zeros(1)
                val_loss = torch.zeros(1)
                for step, (original, keypoints) in enumerate(test_loader):
                    with torch.no_grad():
                        original, keypoints = original.to(
                            device), keypoints.to(device)
                        ground_truth_images, img_reconstr, mu, L_inv, part_map_norm, heat_map, heat_map_norm, total_loss = model(
                            original)
                        # Track Loss and Metric
                        score, mu, L_inv, part_map_norm, heat_map = keypoint_metric(
                            mu, keypoints, L_inv, part_map_norm, heat_map,
                            arg.reconstr_dim)
                        val_score += score.cpu()
                        val_loss += total_loss.cpu()

                val_loss = val_loss / (step + 1)
                val_score = val_score / (step + 1)
                if epoch == 0:
                    best_score = val_score
                if val_score <= best_score:
                    best_score = val_score
                save_model(model, model_save_dir)
                scheduler.step(val_score)
                wandb.log({"Evaluation Loss": val_loss})
                wandb.log({"Metric Validation": val_score})

                # Track Progress & Visualization
                for step, (original, keypoints) in enumerate(test_loader):
                    with torch.no_grad():
                        original, keypoints = original.to(
                            device), keypoints.to(device)
                        ground_truth_images, img_reconstr, mu, L_inv, part_map_norm, heat_map, heat_map_norm, total_loss = model(
                            original)
                        score, mu, L_inv, part_map_norm, heat_map = keypoint_metric(
                            mu, keypoints, L_inv, part_map_norm, heat_map,
                            arg.reconstr_dim)
                        img = visualize_results(ground_truth_images,
                                                img_reconstr, mu, L_inv,
                                                part_map_norm, heat_map,
                                                keypoints,
                                                model_save_dir + '/summary/',
                                                epoch, arg.background)
                        wandb.log(
                            {"Summary_" + str(epoch): [wandb.Image(img)]})
                        if step == 0:
                            break

    elif mode == 'predict':
        # Make Directory for Predictions
        model_save_dir = '../results/' + arg.dataset + '/' + name
        # Dont use Transformations
        arg.tps_scal = 0.
        arg.rot_scal = 0.
        arg.off_scal = 0.
        arg.scal_var = 0.
        arg.augm_scal = 1.
        arg.contrast = 0.
        arg.brightness = 0.
        arg.saturation = 0.
        arg.hue = 0.

        # Load Model and Dataset
        model = Model(arg).to(device)
        model = load_model(model, model_save_dir, device)
        model.eval()

        # Log with wandb
        # wandb.init(project='Disentanglement', config=arg, name=arg.name)
        # wandb.watch(model, log='all')

        # Predict on Dataset
        val_score = torch.zeros(1)
        for step, (original, keypoints) in enumerate(test_loader):
            with torch.no_grad():
                original, keypoints = original.to(device), keypoints.to(device)
                ground_truth_images, img_reconstr, mu, L_inv, part_map_norm, heat_map, heat_map_norm, total_loss = model(
                    original)
                score, mu_new, L_inv, part_map_norm_new, heat_map_new = keypoint_metric(
                    mu, keypoints, L_inv, part_map_norm, heat_map,
                    arg.reconstr_dim)
                if step == 0:
                    img = visualize_predictions(original, img_reconstr, mu_new,
                                                part_map_norm_new,
                                                heat_map_new, mu,
                                                part_map_norm, heat_map,
                                                model_save_dir)
                # wandb.log({"Prediction": [wandb.Image(img)]})
                val_score += score.cpu()

        val_score = val_score / (step + 1)
        print("Validation Score: ", val_score)
Exemplo n.º 15
0
def main(arg):
    # Set random seeds
    torch.manual_seed(7)
    torch.cuda.manual_seed(7)
    np.random.seed(7)

    # Get args
    bn = arg.bn
    mode = arg.mode
    name = arg.name
    load_from_ckpt = arg.load_from_ckpt
    lr = arg.lr
    epochs = arg.epochs
    device = torch.device('cuda:' +
                          str(arg.gpu) if torch.cuda.is_available() else 'cpu')
    arg.device = device

    if mode == 'train':
        # Make new directory
        model_save_dir = '../results/' + name
        if not os.path.exists(model_save_dir):
            os.makedirs(model_save_dir)
            os.makedirs(model_save_dir + '/summary')

        # Save Hyperparameters
        write_hyperparameters(arg.toDict(), model_save_dir)

        # Define Model & Optimizer
        model = Model(arg).to(device)
        if load_from_ckpt:
            model = load_model(model, model_save_dir).to(device)
        optimizer = torch.optim.Adam(model.parameters(), lr=lr)

        # Log with wandb
        wandb.init(project='Disentanglement', config=arg, name=arg.name)
        wandb.watch(model, log='all')
        # Load Datasets and DataLoader
        train_data, test_data = load_deep_fashion_dataset()
        train_dataset = ImageDataset(np.array(train_data))
        test_dataset = ImageDataset(np.array(test_data))
        train_loader = DataLoader(train_dataset,
                                  batch_size=bn,
                                  shuffle=True,
                                  num_workers=4)
        test_loader = DataLoader(test_dataset, batch_size=bn, num_workers=4)

        # Make Training
        with torch.autograd.set_detect_anomaly(False):
            for epoch in range(epochs + 1):
                # Train on Train Set
                model.train()
                model.mode = 'train'
                for step, original in enumerate(train_loader):
                    original = original.to(device)
                    # Make transformations
                    tps_param_dic = tps_parameters(original.shape[0], arg.scal,
                                                   arg.tps_scal, arg.rot_scal,
                                                   arg.off_scal, arg.scal_var,
                                                   arg.augm_scal)
                    coord, vector = make_input_tps_param(tps_param_dic)
                    coord, vector = coord.to(device), vector.to(device)
                    image_spatial_t, _ = ThinPlateSpline(
                        original, coord, vector, original.shape[3], device)
                    image_appearance_t = K.ColorJitter(arg.brightness,
                                                       arg.contrast,
                                                       arg.saturation,
                                                       arg.hue)(original)
                    image_spatial_t, image_appearance_t = normalize(
                        image_spatial_t), normalize(image_appearance_t)
                    reconstruction, loss, rec_loss, equiv_loss, mu, L_inv = model(
                        original, image_spatial_t, image_appearance_t, coord,
                        vector)
                    mu_norm = torch.mean(torch.norm(
                        mu, p=1, dim=2)).cpu().detach().numpy()
                    L_inv_norm = torch.mean(
                        torch.linalg.norm(L_inv, ord='fro',
                                          dim=[2, 3])).cpu().detach().numpy()
                    wandb.log({"Part Means": mu_norm})
                    wandb.log({"Precision Matrix": L_inv_norm})
                    # Zero out gradients
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()
                    # Track Loss
                    if step == 0:
                        loss_log = torch.tensor([loss])
                        rec_loss_log = torch.tensor([rec_loss])
                    else:
                        loss_log = torch.cat([loss_log, torch.tensor([loss])])
                        rec_loss_log = torch.cat(
                            [rec_loss_log,
                             torch.tensor([rec_loss])])
                    training_loss = torch.mean(loss_log)
                    training_rec_loss = torch.mean(rec_loss_log)
                    wandb.log({"Training Loss": training_loss})
                    wandb.log({"Training Rec Loss": training_rec_loss})
                print(f'Epoch: {epoch}, Train Loss: {training_loss}')

                # Evaluate on Test Set
                model.eval()
                for step, original in enumerate(test_loader):
                    with torch.no_grad():
                        original = original.to(device)
                        tps_param_dic = tps_parameters(original.shape[0],
                                                       arg.scal, arg.tps_scal,
                                                       arg.rot_scal,
                                                       arg.off_scal,
                                                       arg.scal_var,
                                                       arg.augm_scal)
                        coord, vector = make_input_tps_param(tps_param_dic)
                        coord, vector = coord.to(device), vector.to(device)
                        image_spatial_t, _ = ThinPlateSpline(
                            original, coord, vector, original.shape[3], device)
                        image_appearance_t = K.ColorJitter(
                            arg.brightness, arg.contrast, arg.saturation,
                            arg.hue)(original)
                        image_spatial_t, image_appearance_t = normalize(
                            image_spatial_t), normalize(image_appearance_t)
                        reconstruction, loss, rec_loss, equiv_loss, mu, L_inv = model(
                            original, image_spatial_t, image_appearance_t,
                            coord, vector)
                        if step == 0:
                            loss_log = torch.tensor([loss])
                        else:
                            loss_log = torch.cat(
                                [loss_log, torch.tensor([loss])])
                evaluation_loss = torch.mean(loss_log)
                wandb.log({"Evaluation Loss": evaluation_loss})
                print(f'Epoch: {epoch}, Test Loss: {evaluation_loss}')

                # Track Progress
                if True:
                    model.mode = 'predict'
                    original, fmap_shape, fmap_app, reconstruction = model(
                        original, image_spatial_t, image_appearance_t, coord,
                        vector)
                    make_visualization(original, reconstruction,
                                       image_spatial_t, image_appearance_t,
                                       fmap_shape, fmap_app, model_save_dir,
                                       epoch, device)
                    save_model(model, model_save_dir)

    elif mode == 'predict':
        # Make Directory for Predictions
        model_save_dir = '../results/' + name
        if not os.path.exists(model_save_dir + '/predictions'):
            os.makedirs(model_save_dir + '/predictions')
        # Load Model and Dataset
        model = Model(arg).to(device)
        model = load_model(model, model_save_dir).to(device)
        data = load_deep_fashion_dataset()
        test_data = np.array(data[-4:])
        test_dataset = ImageDataset(test_data)
        test_loader = DataLoader(test_dataset, batch_size=bn)
        model.mode = 'predict'
        model.eval()
        # Predict on Dataset
        for step, original in enumerate(test_loader):
            with torch.no_grad():
                original = original.to(device)
                tps_param_dic = tps_parameters(original.shape[0], arg.scal,
                                               arg.tps_scal, arg.rot_scal,
                                               arg.off_scal, arg.scal_var,
                                               arg.augm_scal)
                coord, vector = make_input_tps_param(tps_param_dic)
                coord, vector = coord.to(device), vector.to(device)
                image_spatial_t, _ = ThinPlateSpline(original, coord, vector,
                                                     original.shape[3], device)
                image_appearance_t = K.ColorJitter(arg.brightness,
                                                   arg.contrast,
                                                   arg.saturation,
                                                   arg.hue)(original)
                image, reconstruction, mu, shape_stream_parts, heat_map = model(
                    original, image_spatial_t, image_appearance_t, coord,
                    vector)
Exemplo n.º 16
0
def main2(arg):
    # Get args
    bn = arg.bn
    mode = arg.mode
    name = arg.name
    load_from_ckpt = arg.load_from_ckpt
    lr = arg.lr
    weight_decay = arg.weight_decay
    epochs = arg.epochs
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    arg.device = device

    if mode == 'train':
        # Make new directory
        model_save_dir = name + "/training2"
        if not os.path.exists(model_save_dir):
            os.makedirs(model_save_dir)
            os.makedirs(model_save_dir + '/image')
            os.makedirs(model_save_dir + '/reconstruction')
            os.makedirs(model_save_dir + '/mu')
            os.makedirs(model_save_dir + '/parts')

        # Load Datasets
        train_data = load_images_from_folder()[:100]
        train_dataset = ImageDataset2(train_data, arg)
        test_data = load_images_from_folder()[-1000:]
        test_dataset = ImageDataset2(test_data, arg)

        # Prepare Dataloader & Instances
        train_loader = DataLoader(train_dataset, batch_size=bn, shuffle=True)
        test_loader = DataLoader(test_dataset, batch_size=bn)
        model = Model2(arg).to(device)
        if load_from_ckpt == True:
            model = load_model(model, model_save_dir).to(device)
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=lr,
                                     weight_decay=weight_decay)

        # Make Training
        for epoch in range(epochs):
            # Train on Train Set
            model.train()
            model.mode = 'train'
            for step, original in enumerate(train_loader):
                original = original.to(device, dtype=torch.float)
                optimizer.zero_grad()
                # plot_tensor(original[0])
                # plot_tensor(spat[0])
                # plot_tensor(app[0])
                # plot_tensor(original[1])
                # plot_tensor(spat[1])
                # plot_tensor(app[1])
                # print(coord, vec)
                prediction, loss = model(original)
                loss.backward()
                optimizer.step()
                if epoch % 2 == 0 and step == 0:
                    print(f'Epoch: {epoch}, Train Loss: {loss}')

            # Evaluate on Test Set
            model.eval()
            for step, original in enumerate(test_loader):
                with torch.no_grad():
                    original = original.to(device, dtype=torch.float)
                    prediction, loss = model(original)
                    if epoch % 2 == 0 and step == 0:
                        print(f'Epoch: {epoch}, Test Loss: {loss}')

            # Track Progress
            if epoch % 5 == 0:
                model.mode = 'predict'
                image, reconstruction, mu, shape_stream_parts, heat_map = model(
                    original)
                for i in range(len(image)):
                    save_image(
                        image[i], model_save_dir + '/image/' + str(i) + '_' +
                        str(epoch) + '.png')
                    save_image(
                        reconstruction[i], model_save_dir + '/image/' +
                        str(i) + '_' + str(epoch) + '.png')
                    #save_image(mu[i], model_save_dir + '/image/' + str(epoch) + '.png')
                    #save_image(shape_stream_parts[i], model_save_dir + '/image/' + str(epoch) + '.png')

            # Save the current Model
            if epoch % 50 == 0:
                save_model(model, model_save_dir)

    elif arg.mode == 'predict':
        model_save_dir = arg.name + "/prediction"
        if not os.path.exists(model_save_dir):
            os.makedirs(model_save_dir)