コード例 #1
0
def evaluate(model, data_loader, alpha):
    
    args = parser.arg_parse()
    ''' set model to evaluate mode '''
    #model.eval()
    
    preds = []
    gts = [] #ground truth
    print('start evaluate')
    with torch.no_grad(): # do not need to caculate information for gradient during eval
        for idx, (imgs, gt) in enumerate(data_loader):
            imgs = imgs.cuda()
            pred_class, pred_domain = model(imgs, alpha)
           
            
            _, pred = torch.max(pred_class, dim = 1)

            pred = pred.cpu().numpy().squeeze()
            gt = gt.numpy().squeeze()
            
            preds.append(pred)
            gts.append(gt)
        
        
    gts = np.concatenate(gts)
    
    preds = np.concatenate(preds)

    np.save(args.save_dir + 'preds.npy', preds) 		    
    return accuracy_score(gts, preds) #maybe gts preds#, preds#accuracy_score(gts, preds)
コード例 #2
0
def main():
    user = (subprocess.check_output("whoami", shell=True)).rstrip()
    args = arg_parse()

    if args['which'] == 'launch-ec2':
        post_hook(user, args['which'], args['env'], args['role'])
        r_code, r_text = launch_ec2(args['instance_type'], args['role'],
                                    args['ip'], args['env'], args['public'])
        if r_code == 200:
            print "Job Queued Successfully. JobID %s" % r_text
        else:
            print "Failed to Queue to the Job. Please try again later"
    elif args['which'] == "list_lc":
        r_code, r_text = list_lc(args['env'])
        print r_text
    elif args['which'] == "create_lc":
        r_code, r_text = create_lc(args['env'], args['instance_type'],
                                   args['role'], args['public'], args['ami'])
        if r_code == 200:
            print "Job Queued Successfully. JobID %s" % r_text
        else:
            print "Failed to Queue to the Job. Please try again later"
    elif args['which'] == 'bootstrap':
        post_hook(user, args['which'], args['env'], args['role'])
        r_code, r_text = bootstrap(args['host'], args['role'], args['env'])
        if r_code == 200:
            print "Job Queued Successfully. JobID %s" % r_text
        else:
            print "Failed to Queue to the Job. Please try again later"
    elif args['which'] == 'codeupdate':
        post_hook(user, args['which'], args['env'], args['tag'])
        r_code, r_text = codeupdate(args['tag'], args['env'])
        if r_code == 200:
            print "Job Queued Successfully. JobID %s" % r_text
        else:
            print "Failed to Queue to the Job. Please try again later"
    elif args['which'] == 'adhoc':
        r_code, r_text = adhoc(args['host'], args['module'], args['args'],
                               args['env'])
        print r_text
    elif args['which'] == 'adhocj':
        r_code, r_text = adhocj(args['host'], args['module'], args['args'],
                                args['env'])
        if r_code == 200:
            print "Job Queued Successfully. JobID %s" % r_text
        else:
            print "Failed to Queue to the Job. Please try again later"
    elif args['which'] == 'result':
        r_code, r_text = result(args['jid'], args['env'])
        print r_text
    elif args['which'] == 'inventory':
        r_code, r_text = ec2list(args['pattern'], args['env'])
        print r_text
    elif args['which'] == 'version':
        bootstrapper_ver = pkg_resources.require("bootstrapper")[0].version
        print "Bootstrapper CLI version %s" % bootstrapper_ver
    else:
        print "invalid command"
コード例 #3
0
def main():
    user = (subprocess.check_output("whoami", shell=True)).rstrip()
    args = arg_parse()

    if args['which'] == 'launch-ec2':
        post_hook(user, args['which'], args['env'], args['role'])
        r_code,r_text = launch_ec2(args['instance_type'], args['role'], args['ip'], args['env'], args['public'])
        if r_code == 200:
            print "Job Queued Successfully. JobID %s" %r_text
        else:
            print "Failed to Queue to the Job. Please try again later"
    elif args['which'] == "list_lc":
        r_code,r_text =  list_lc(args['env'])
        print r_text
    elif args['which'] == "create_lc":
        r_code,r_text = create_lc(args['env'], args['instance_type'], args['role'], args['public'], args['ami'])
        if r_code == 200:
            print "Job Queued Successfully. JobID %s" %r_text
        else:
            print "Failed to Queue to the Job. Please try again later"
    elif args['which'] == 'bootstrap':
        post_hook(user, args['which'], args['env'], args['role'])
        r_code,r_text =  bootstrap(args['host'], args['role'], args['env'])
        if r_code == 200:
            print "Job Queued Successfully. JobID %s" %r_text
        else:
            print "Failed to Queue to the Job. Please try again later"
    elif args['which'] == 'codeupdate':
        post_hook(user, args['which'], args['env'], args['tag'])
        r_code,r_text = codeupdate(args['tag'], args['env'])
        if r_code == 200:
            print "Job Queued Successfully. JobID %s" %r_text
        else:
            print "Failed to Queue to the Job. Please try again later"
    elif args['which'] == 'adhoc':
        r_code,r_text = adhoc(args['host'], args['module'], args['args'], args['env'])
	print r_text
    elif args['which'] == 'adhocj':
        r_code,r_text = adhocj(args['host'], args['module'], args['args'], args['env'])
        if r_code == 200:
            print "Job Queued Successfully. JobID %s" %r_text
        else:
            print "Failed to Queue to the Job. Please try again later"
    elif args['which'] == 'result':
        r_code,r_text = result(args['jid'], args['env'])
	print r_text
    elif args['which'] == 'inventory':
        r_code,r_text = ec2list(args['pattern'], args['env'])
        print r_text
    elif args['which'] == 'version':
        bootstrapper_ver = pkg_resources.require("bootstrapper")[0].version
        print "Bootstrapper CLI version %s" %bootstrapper_ver
    else:
        print "invalid command"
コード例 #4
0
    def __init__(self, mode, train_status):

        # Load parsed arguments
        args = argparser.arg_parse()
        mean = argparser.mean
        std = argparser.std

        # Set directories
        self.mode = mode
        self.train_status = train_status
        self.test_info_dir = args.out_dir_info
        self.size = args.size

        #distinguish wether model is pre-trained or fine tuned
        if train_status == 'pretrain':
            self.data_dir = args.data_dir_pre_train
        else:
            self.data_dir = args.data_dir_fine_tune

        #set directories
        if self.mode == "train":
            gt_dir = os.path.join(self.data_dir, "imgs")
            masked_imgs_dir = os.path.join(self.data_dir, "masked_imgs")
            masks_dir = os.path.join(self.data_dir, "mask")
        elif self.mode == "test" and self.train_status == 'TA':
            masked_imgs_dir = os.path.join(args.data_dir_test)
        elif self.mode == "test":
            masked_imgs_dir = os.path.join(args.data_dir_test, "test")
            gt_dir = os.path.join(args.data_dir_test, "test_gt")
        else:
            print("Invalid mode in dataloader")
            sys.exit()

        # Get paths to each image. Length = number of images in directory (e.g. 400 for training)
        if self.mode == "train":
            self.masks = sorted(glob.glob(os.path.join(masks_dir, '*.png')))
            self.masked_imgs = sorted(
                glob.glob(os.path.join(masked_imgs_dir, '*.jpg')))
        elif self.mode == "test":
            self.masked_imgs = sorted(
                glob.glob(os.path.join(masked_imgs_dir, '*masked.jpg')))
            self.masks = sorted(
                glob.glob(os.path.join(masked_imgs_dir, '*mask.jpg')))

        if not train_status == 'TA':
            self.gt_imgs = sorted(glob.glob(os.path.join(gt_dir, '*.jpg')))

        # Define transform
        self.transformT = transforms.Compose([transforms.ToTensor()])
        self.transformN = transforms.Compose([transforms.Normalize(mean, std)])
コード例 #5
0
import argparser as parser
import model_2 as models
import data 
import test_source as test

import numpy as np

#from test import evaluate

def save_model(model, save_path):
    torch.save(model.state_dict(),save_path)


if __name__=='__main__':

    args = parser.arg_parse()
    
    data_mnist =data.Mnist(args, mode='train')
    data_SVHN =data.SVHN(args, mode='train')
    '''create directory to save trained model and other info'''
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    #''' setup GPU '''
    torch.cuda.set_device(args.gpu)

    ''' setup random seed '''
    np.random.seed(args.random_seed)
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)
コード例 #6
0
                save_model(model, os.path.join(args.model_dir_fine_tune, "Net_finetune_epoch{}.pth.tar".format(epoch+1)))
                if epoch > 0:
                    remove_prev_model(os.path.join(args.model_dir_fine_tune, "Net_finetune_epoch{}.pth.tar".format(epoch)))

    print("\n***** Fine-tuning FINISHED *****")


if __name__ == "__main__":

    # -------
    #  General Setup
    # -------

    # Read input arguments
    args = argparser.arg_parse()

    # Directory for miscellaneous outputs
    if not os.path.exists(args.output_dir):
        print("Created directory for outputs: {}".format(args.output_dir))
        os.makedirs(args.output_dir)

    # Set device
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")

    # Set seeds for batch shuffling
    random.seed(1)
    torch.manual_seed(1)

    # -------
    # Model