Esempio n. 1
0
    def run_experiment(self):
        class param:
            pass

        #param_msg = "input_size = {},output_size = {},hidden_dim = {},n_layers = {},rnn_dropout = {},fc_dropout = {},epoch= {},lr = {},batch = {},sequence = {},clip = {}, max={}"
        args = param()
        parse = utils.parse_args()
        #logger = logging.getLogger('Manual_exp')
        logging.basicConfig(level=logging.INFO)
        device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
        dataset = ActicipateDataset()
        input_size = 44
        output_size = 12
        hidden_dim = np.random.choice(a=[parse.hidden_dim], size=1)[0]
        n_layers = np.random.choice(a=[parse.n_layers], size=1)[0]
        rnn_dropout = np.random.choice(a=[0.3], size=1)[0]
        fc_dropout = np.random.choice(a=[.5], size=1)[0]

        #train params
        lr = np.random.choice(a=[parse.lr], size=1)[0]
        batch = np.random.choice(a=[parse.batch_size], size=1)[0]
        sequence = np.random.choice(a=[parse.seq], size=1)[0]
        clip = np.random.choice(a=[parse.grad_clip], size=1)[0]
        max_clip_sequence = np.random.choice(a=[parse.trunc_seq], size=1)[0]

        epoch = parse.epoch
        args.hidden_dim = hidden_dim
        args.n_layers = n_layers
        args.rnn_dropout = rnn_dropout
        args.fc_dropout = fc_dropout
        args.epoch = epoch
        args.lr = lr
        args.batch_size = batch
        args.seq = sequence
        args.clip = clip

        #logging.info(param_msg.format(input_size,output_size,hidden_dim,n_layers,rnn_dropout,fc_dropout,epoch,lr,batch,sequence,clip,max_clip_sequence))
        folds = 10
        accuracy = np.zeros((folds))
        for k, (train_data,
                test_data) in enumerate(dataset.cross_validation(k=folds)):
            model = ActAnticipationModel(input_size,
                                         output_size,
                                         hidden_dim,
                                         n_layers,
                                         rnn_dropout=0.5,
                                         fc_dropout=0.5)
            model = model.to(device)
            model = self.train(args, model, train_data, max_clip_sequence,
                               device, logging)
            acc = self.predict(model, test_data, logging)
            accuracy[k] = acc

        logging.info("Cross val: {:.2f}%".format(accuracy.mean() * 100))
    def run_experiment(self):
        class param:pass
        params = param()
        logging.basicConfig(level=logging.INFO)
        self.device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
        args = utils.parse_args()
        dataset = ActicipateDataset()
        input_size = 32
        output_size = 12
        hidden_dim = args.hidden_dim
        n_layers =  args.n_layers
        rnn_dropout = .5
        fc_dropout = .7
    
        #train params
        lr = args.lr
        batch = args.batch_size
        sequence = args.seq
        clip = args.grad_clip
        max_clip_sequence = args.trunc_seq
        
        epoch = args.epoch
        params.hidden_dim = args.hidden_dim
        params.n_layers = args.n_layers
        params.rnn_dropout = rnn_dropout
        params.fc_dropout = fc_dropout
        params.epoch = epoch
        params.lr = lr
        params.batch_size = batch
        params.seq = sequence
        params.clip = clip

        folds = 10
        accuracy = np.zeros((folds))
        test_results = []
        for k, (train_data, test_data) in  enumerate(dataset.cross_validation(k=folds)): 
            model = BActAnticipationModel(args.data_type, input_size, output_size, hidden_dim,n_layers, rnn_dropout = 0.5, fc_dropout = 0.5)
            model = model.to(self.device)
            model = self.train(params,model, train_data, max_clip_sequence, self.device, logging)  
            acc = self.predict(model, test_data,test_results, logging)
            accuracy[k] = acc
            #if acc < 0.95:break
            
        if accuracy.mean() > 0.95:
            experiment = "{:4d}".format(int(np.random.rand()*10000))
            pickle.dump(test_results, open( "prediction_brnn_{}.pkl".format(experiment), "wb" ), protocol=2)
            #pickle.dump(params, open( "params_brnn_{}.pkl".format( experiment), "wb" ), protocol=2)
            logging.info("Cross val: {:.2f}%".format(accuracy.mean()*100))
            torch.save(model.state_dict(), "act_model_BBB_{}.pth".format(experiment))
    def find_parameters(self):
        class param:pass
        args = param()


        logger = logging.getLogger('simple_example{}'.format(file_id))
        logger.setLevel(logging.INFO)
        # create file handler which logs even debug messages
        fh = logging.FileHandler('log_test{}.log'.format(file_id))
        fh.setLevel(logging.INFO)
        # create formatter and add it to the handlers
        formatter = logging.Formatter('%(asctime)s :: %(message)s')
        fh.setFormatter(formatter)
        # add the handlers to logger
        logger.addHandler(fh)

        param_msg = "input_size = {},output_size = {},hidden_dim = {},n_layers = {},rnn_dropout = {},fc_dropout = {},epoch= {},lr = {},batch = {},sequence = {},clip = {}, max={}"
        
        #logger.basicConfig(filename="log_test_{}.txt".format(file_id), filemode='w',level=logging.INFO, format='%(asctime)s - %(message)s')
        device = torch.device('cuda:{}'.format(id) if torch.cuda.is_available() else 'cpu')
        logger.info("\n\n\nRunning in {}".format(device))

        dataset = ActicipateDataset()
        best_args = None
        best_acc = 0
        for i in range(20):
        #model params
            input_size = 64
            output_size = 12
            hidden_dim = np.random.choice(a= [128,256,512],size=1)[0]
            n_layers =  np.random.choice(a= [2,4],size=1)[0]
            rnn_dropout = np.random.choice(a= [.5],size=1)[0]
            fc_dropout = np.random.choice(a= [.5],size=1)[0]
            
            #train params
            epoch = 20
            lr = np.random.choice(a= [1e-3,5e-3,5e-4],size=1)[0]
            batch = np.random.choice(a= [16,32,64],size=1)[0]
            sequence = np.random.choice(a= [10,16,32],size=1)[0]
            clip = np.random.choice(a= [3,5,10],size=1)[0]
            max_clip_sequence = np.random.choice(a= [80,100],size=1)[0]
            
            args.hidden_dim = hidden_dim
            args.n_layers = n_layers
            args.rnn_dropout = rnn_dropout
            args.fc_dropout = fc_dropout
            args.epoch = epoch
            args.lr = lr
            args.batch_size = batch
            args.seq = sequence
            args.clip = clip

            logger.info(param_msg.format(input_size,output_size,hidden_dim,n_layers,rnn_dropout,fc_dropout,epoch,lr,batch,sequence,clip,max_clip_sequence))
            model = S2SActionAnticipation(input_size, output_size, hidden_dim,n_layers, rnn_dropout = 0.5, fc_dropout = 0.5)
            accuracy = np.zeros((5))
            for k, (train_data, test_data) in  enumerate(dataset.cross_validation(k=5)): 
                model = model.to(device)
                model = self.train(args,model, train_data, max_clip_sequence, device, logger)  
                acc = self.predict(model, test_data, logger)
                accuracy[k] = acc
                if acc < 0.9:break
            mean_acc = accuracy.mean()
            if mean_acc > best_acc:
                best_acc = mean_acc
                best_args = args

        logger.info("Best Acc: {}".format(best_acc))    
        logger.info(param_msg.format(input_size,output_size,best_args.hidden_dim,\
                    best_args.n_layers,best_args.rnn_dropout,best_args.fc_dropout,best_args.epoch,\
                    best_args.lr,best_args.batch,best_args.sequence,best_args.clip,best_args.max_clip_sequence))
    for o,i in zip(out,input):
       print([(j,k,j-k) for j,k in zip(i,o)])
    print('Predict loss:{:.6f}'.format( epoch_loss))
            

if __name__ == "__main__":

    num_epochs = 200
    batch_size = 32
    learning_rate = 5e-3
    model = ContextModel().cuda()
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(
        model.parameters(), lr=learning_rate)
    scheduler = StepLR(optimizer, step_size=1, gamma=0.98)
    dataset = ActicipateDataset()
    data = dataset.data
    data = np.concatenate((np.zeros((2000,44)),data[:,2:]), 0)
    size = len(data)
    indexes = list(range(size))
    random.shuffle(indexes)
    data = data[indexes]
    test = data[:int(size*0.4)]
    train = data[int(size*0.4):]
    # model.load_state_dict(torch.load("./context_autoencoder.pth"))
    # model.cuda()
    # predict(model,test)
    # return
    for epoch in range(num_epochs):
        scheduler.step()
        epoch_loss = 0
    def random_search(self,id, file_id,):
        class param:pass
        params = param()

        logger = logging.getLogger('simple_example{}'.format(file_id))
        logger.setLevel(logging.INFO)
        # create file handler which logs even debug messages
        fh = logging.FileHandler('log_test{}.log'.format(file_id))
        fh.setLevel(logging.INFO)
        # create formatter and add it to the handlers
        formatter = logging.Formatter('%(asctime)s :: %(message)s')
        fh.setFormatter(formatter)
        # add the handlers to logger
        logger.addHandler(fh)

        param_msg = "input_size = {}, output_size = {}, hidden_dim = {}, n_layers = {}, std = {}, epoch= {},lr = {},batch = {},sequence = {},clip = {}, max={}"
        
        #logger.basicConfig(filename="log_test_{}.txt".format(file_id), filemode='w',level=logging.INFO, format='%(asctime)s - %(message)s')
        self.device = torch.device('cuda:{}'.format(id) if torch.cuda.is_available() else 'cpu')
        logger.info("\n\n\nRunning in {}".format(self.device))

        dataset = ActicipateDataset()
        best_acc = 0
        for i in range(40):
        #model params
            input_size = 32
            output_size = 12
            hidden_dim = np.random.choice(a= [64],size=1)[0]
            n_layers =  np.random.choice(a= [2],size=1)[0]
            rnn_dropout = np.random.choice(a= [.5],size=1)[0]
            fc_dropout = np.random.choice(a= [.5],size=1)[0]
            
            #train params
            epoch = 1000
            lr = np.random.choice(a= [5e-3],size=1)[0]
            batch = np.random.choice(a= [32],size=1)[0]
            sequence = np.random.choice(a= [32],size=1)[0]
            clip = np.random.choice(a= [0.1, 0.2,0.3,0.4],size=1)[0]
            max_clip_sequence = np.random.choice(a= [128],size=1)[0]
            logstd = np.random.choice(a= [-2],size=1)[0]
            params.hidden_dim = hidden_dim
            params.n_layers = n_layers
            params.rnn_dropout = rnn_dropout
            params.fc_dropout = fc_dropout
            params.epoch = epoch
            params.lr = lr
            params.batch_size = batch
            params.seq = sequence
            params.clip = clip
            params.logstd = logstd
            params.max_clip_sequence = max_clip_sequence            

            logger.info(param_msg.format(input_size,output_size,hidden_dim,n_layers,logstd,epoch,lr,batch,sequence,clip,max_clip_sequence))
            folds = 10
            accuracy = np.zeros((folds))
            test_results = []
            for k, (train_data, test_data) in  enumerate(dataset.cross_validation(k=folds)): 
                model = BActAnticipationModel("lstm", input_size, output_size, hidden_dim,n_layers, rnn_dropout = 0.5, fc_dropout = 0.5, logstd=logstd)
                model = model.to(self.device)
                model = self.train(params,model, train_data, max_clip_sequence, self.device, logger)  
                acc = self.predict(model, test_data,test_results, logger)
                accuracy[k] = acc
                if acc < 0.95:break
            
            if accuracy.mean() > 0.98:
                experiment = "{:4d}".format(int(np.random.rand()*10000))
                pickle.dump(test_results, open( "prediction_brnn_{}.pkl".format(experiment), "wb" ), protocol=2)
                pickle.dump(params, open( "params_brnn_{}.pkl".format( experiment), "wb" ), protocol=2)
                logger.info("Cross val: {:.2f}%".format(accuracy.mean()*100))
                torch.save(model.state_dict(), "act_model_BBB_{}.pth".format(experiment))
                break
Esempio n. 6
0
    def run_experiment(self):
        class param:
            pass

        params = param()
        logging.basicConfig(
            filename='BBBlogs.log',
            level=logging.INFO,
            format='%(asctime)s.%(msecs)03d :: %(message)s',
            datefmt='%Y-%m-%d %H:%M:%S',
        )
        self.device = torch.device(
            'cuda:2' if torch.cuda.is_available() else 'cpu')
        args = utils.parse_args()
        dataset = ActicipateDataset()
        input_size = 32
        output_size = 12
        hidden_dim = args.hidden_dim
        n_layers = args.n_layers
        rnn_dropout = .5
        fc_dropout = .7

        #train params
        lr = args.lr
        batch = args.batch_size
        sequence = args.seq
        clip = args.grad_clip
        max_clip_sequence = args.trunc_seq
        logstd1 = -2
        logstd2 = -4
        pi = 0.2

        epoch = args.epoch
        params.hidden_dim = args.hidden_dim
        params.n_layers = args.n_layers
        params.rnn_dropout = rnn_dropout
        params.fc_dropout = fc_dropout
        params.epoch = epoch
        params.lr = lr
        params.batch_size = batch
        params.seq = sequence
        params.clip = clip
        start = int(clip * 10)
        for i in range(start, 100):
            clip = i * 0.1
            params.clip = clip
            logging.info("************* CLIP = {} **************".format(
                params.clip))
            folds = 10
            accuracy = np.zeros((folds))
            test_results = []
            for k, (train_data,
                    test_data) in enumerate(dataset.cross_validation(k=folds)):
                model = BActAnticipationModel(args.data_type,
                                              input_size,
                                              output_size,
                                              hidden_dim,
                                              n_layers,
                                              logstd1=logstd1,
                                              logstd2=logstd2,
                                              pi=pi,
                                              rnn_dropout=0.5,
                                              fc_dropout=0.5)
                model = model.to(self.device)
                model = self.train(params, model, train_data,
                                   max_clip_sequence, self.device, logging)
                acc = self.predict(model, test_data, test_results, logging)
                accuracy[k] = acc
                if acc < 0.99: break

            if accuracy.mean() >= 0.99:
                experiment = "{:4d}".format(int(np.random.rand() * 10000))
                pickle.dump(test_results,
                            open("prediction_BBB_{}.pkl".format(experiment),
                                 "wb"),
                            protocol=2)
                #pickle.dump(params, open( "params_brnn_{}.pkl".format( experiment), "wb" ), protocol=2)
                logging.info("Cross val: {:.2f}%".format(accuracy.mean() *
                                                         100))
                torch.save(model.state_dict(),
                           "act_model_BBB_{}.pth".format(experiment))
                break