Ejemplo n.º 1
0
def main():
  args = get_parser()
  # create logger
  if not os.path.exists(args.log_dir):
    os.makedirs(args.log_dir)
  #logger = Logger(str(args.log_dir)+'\'+name, args.manual_seed)
  #logger.print ("args :\n{:}".format(args))

  assert torch.cuda.is_available(), 'You must have at least one GPU'

  # set random seed
  torch.backends.cudnn.benchmark = True
  np.random.seed(args.manual_seed)
  torch.manual_seed(args.manual_seed)
  torch.cuda.manual_seed(args.manual_seed)

  name_list=[
     'ECG200',
  ]
  ratio_number_list =  [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
  #ratio_number_list =  [0.1,0.2,0.3]
  ind_number_list = [0,1,2,3,4,5,6,7,8,9]
  for name in name_list:
        for ratio_number_ind, ratio_number in enumerate(ratio_number_list):
           
          for ind_number in ind_number_list:
            result_matrix = load_proto_result(name,ratio_number_list, ind_number_list) 
            if  result_matrix[ratio_number_ind][ind_number]!=0:
                print(ratio_number,ind_number,'already done result = ', result_matrix[ratio_number_ind][ind_number])
                continue
            try:
                proto_bag= [name,ratio_number,ind_number]
                
                logger = Logger(str(args.log_dir)+'/'+name+'/'+str(ratio_number)+'/'+str(ind_number), args.manual_seed)
                logger.print ("args :\n{:}".format(args))
                # create dataloader
                train_dataset    = timeSeriesDataset(args.dataset_root, 'train', name,ratio_number, ind_number)
                train_sampler    = FewShotSampler(train_dataset.label, args.num_support_tr + args.num_query_tr, args.iterations)
                train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_sampler=train_sampler, num_workers=args.workers)

                test_dataset     = timeSeriesDataset(args.dataset_root, 'test', name,ratio_number, ind_number)
                test_sampler     = FewShotSampler(test_dataset.label, args.num_support_tr + args.num_query_tr, 600)
                test_dataloader  = torch.utils.data.DataLoader(test_dataset, batch_sampler=test_sampler, num_workers=args.workers)
    
                # create model
                model = models.__dict__[args.arch](train_dataset.fea_dim,256,64)
                model  = torch.nn.DataParallel(model).cuda()
                logger.print ("model:::\n{:}".format(model))

                criterion = nn.CrossEntropyLoss().cuda()
                params = [p for p in model.parameters()] 
                optimizer    = torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
                lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, gamma=args.lr_gamma, step_size=args.lr_step)
                info_path ,test_acc1_avg= run(args, model, logger, criterion, optimizer, lr_scheduler, train_dataloader, test_dataloader,proto_bag)
                logger.print ('save into {:}'.format(info_path))
                sentence = 'dataset_name=\t'+name+'\t'+'ratio=\t'+str(ratio_number)+'\t'+'ind=\t'+str(ind_number)+'\t'+'test_acc=\t'+str(test_acc1_avg)
                save_to_file(sentence, name)
            except:
                print(ratio_number)
Ejemplo n.º 2
0
def get_proto(args,model,proto_bag):
    name = proto_bag[0]
    ratio_number = proto_bag[1]
    ind_number = proto_bag[2]
    
    train_dataset    = timeSeriesDataset(args.dataset_root, 'train', name,ratio_number, ind_number)
    feas = train_dataset.feature
    labels = train_dataset.label
    embs = model(feas)            
    cpu_labels = labels.cpu().tolist()
    idxs_dict = defaultdict(list)
    for i, l in enumerate(cpu_labels):
        idxs_dict[l].append(i)
    idxs_dict = dict(sorted(idxs_dict.items()))
    grouped_s_idxs = []
    for lab, idxs in idxs_dict.items():
        grouped_s_idxs.append(torch.LongTensor(idxs[:]))
    proto_lst = [torch.mean(embs[s_idxs], dim=0) for s_idxs in grouped_s_idxs]
    proto = torch.stack(proto_lst, dim=0)
    return proto
Ejemplo n.º 3
0
def main():
  args = get_parser()
  args.dataset_root= os.getcwd()[:-18]+'/SFA_Python-master/test/BOSS_feature_Data_pyts/'
  print(args.dataset_root)
  #SFA_Python-master/test/BOSS_feature_Data_pyts/ 
  # create logger
  if not os.path.exists(args.log_dir):
    os.makedirs(args.log_dir)
  #logger = Logger(str(args.log_dir)+'\'+name, args.manual_seed)
  #logger.print ("args :\n{:}".format(args))

  assert torch.cuda.is_available(), 'You must have at least one GPU'

  # set random seed
  torch.backends.cudnn.benchmark = True
  np.random.seed(args.manual_seed)
  torch.manual_seed(args.manual_seed)
  torch.cuda.manual_seed(args.manual_seed)

  name_list=[
    'ArrowHead',
    'BME',
    'CBF',
    'Chinatown',
    'ECG200',
    'GunPoint',
    'GunPointAgeSpan',
    'GunPointOldVersusYoung',
    'ItalyPowerDemand',
    'MoteStrain',
    'Plane',
    'SonyAIBORobotSurface1',
    'SonyAIBORobotSurface2',
    'SyntheticControl',
    'ToeSegmentation1',
    'TwoLeadECG',
    'UMD',
    'Wine',
  ]
  ratio_number_list =  [1]
  ind_number_list = [10]
  for name in name_list:
        for ratio_number_ind, ratio_number in enumerate(ratio_number_list):
           
          for ind_number in ind_number_list:

            #try:
                proto_bag= [name,ratio_number,ind_number]
                
                logger = Logger(str(args.log_dir)+'/'+name+'/'+str(ratio_number)+'/'+str(ind_number), args.manual_seed)
                logger.print ("args :\n{:}".format(args))
                # create dataloader
                train_dataset    = timeSeriesDataset(args.dataset_root, 'train', name,ratio_number, ind_number)
                train_sampler    = FewShotSampler(train_dataset.label, args.num_support_tr + args.num_query_tr, args.iterations)
                train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_sampler=train_sampler, num_workers=args.workers)

                test_dataset     = timeSeriesDataset(args.dataset_root, 'test', name,ratio_number, ind_number)
                test_sampler     = FewShotSampler(test_dataset.label, args.num_support_tr + args.num_query_tr, 600)
                test_dataloader  = torch.utils.data.DataLoader(test_dataset, batch_sampler=test_sampler, num_workers=args.workers)
                
                # create model
                model = models.__dict__[args.arch](train_dataset.fea_dim,256,64)
                model  = torch.nn.DataParallel(model).cuda()
                logger.print ("model:::\n{:}".format(model))
                
                criterion = nn.CrossEntropyLoss().cuda()
                params = [p for p in model.parameters()] 
                optimizer    = torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
                lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, gamma=args.lr_gamma, step_size=args.lr_step)
                info_path ,test_acc1_avg= run(args, model, logger, criterion, optimizer, lr_scheduler, train_dataloader, test_dataloader,proto_bag)
                logger.print ('save into {:}'.format(info_path))
                sentence = 'dataset_name=\t'+name+'\t'+'ratio=\t'+str(ratio_number)+'\t'+'ind=\t'+str(ind_number)+'\t'+'test_acc=\t'+str(test_acc1_avg)
                save_to_file(sentence, name)
Ejemplo n.º 4
0
def load_SFA_train_data(name):
    args_dataset_root = '/home/tangw/Desktop/TSC/BOSS_NN/SFA_Python-master/test/BOSS_feature_Data_pyts/'
    train_dataset = timeSeriesDataset(args_dataset_root, 'train', name, 1, 10)
    return train_dataset
Ejemplo n.º 5
0
name_list = [
    'ECG200',
]
ratio_number_list = [1]
ind_number_list = [10]
args_arch = 'linear_transform'
args_dataset_root = '/home/tangw/Desktop/TSC/BOSS_NN/SFA_Python-master/test/BOSS_feature_Data_pyts/'
using_proto = True
pick_all = True

for name in name_list:
    for ind_radio, ratio_number in enumerate(ratio_number_list):
        for ind, ind_number in enumerate(ind_number_list):
            model_lst_path = get_log_path(name, ratio_number, ind_number)

            train_dataset = timeSeriesDataset(args_dataset_root, 'train', name,
                                              ratio_number, ind_number)
            model = models.__dict__[args_arch](train_dataset.fea_dim, 256, 64)
            model = torch.nn.DataParallel(model).cuda()

            checkpoint = torch.load(model_lst_path)
            start_epoch = checkpoint['epoch'] + 1
            best_acc = checkpoint['best_acc']
            model.load_state_dict(checkpoint['model_state_dict'])
            model.eval()

            feas = train_dataset.feature
            labels = train_dataset.label
            embs = model(feas)

            cpu_labels = labels.cpu().tolist()
            idxs_dict = defaultdict(list)