Beispiel #1
0
def test_wide_deep():
    utils.config_logging('log_{}.log'.format('WideEstimator'))
    n_epoch = 10
    batch_size = 32
    data_source = DataSource('./dataset/train.csv', './dataset/test.csv', batch_size)
    wide_hparams = WideHParams(field_names=CATEGORY_FIELDS,
                               alpha=0.1,
                               beta=1,
                               L1=0.1,
                               L2=0.1)
    deep_hparams = get_deep_hparams(embed_size=16,
                                    hidden_units=[64, 16],
                                    L2=0.01,
                                    learning_rate=0.001)
    start_time = time.time()
    estimator = WideDeepEstimator(wide_hparams=wide_hparams, deep_hparams=deep_hparams, data_source=data_source)
    metrics_history = estimator.train(n_epoch)
    elapsed = time.time() - start_time

    # ************ display result
    logging.info("\n************** TIME COST **************")
    logging.info('{:.2f} seconds for {} epoches'.format(elapsed, n_epoch))
    logging.info('{:.2f} examples per second'.format(
        n_epoch * (data_source.n_train_examples + data_source.n_test_examples) / elapsed))

    logging.info("\n************** LEARNING CURVE **************")
    metrics_history = pd.DataFrame(metrics_history)
    logging.info(metrics_history)
    metrics_history.to_csv('learn_curve_{}.csv'.format(estimator), index=False)
def main(args: Namespace):
    config_logging(args)
    logger.debug(args)

    if args.assume_supported:
        logger.info("Skipping /etc/lsb-release check")
    else:
        if not check_ubuntu():
            logger.error("Unsupported system")
            return 1

    for item in load_config(args):
        logger.debug(item)
        requirements = item.get("requirements")
        if requirements is None:
            logger.warning("Skipping {} as it has no 'requirements'",
                           item["dir"])
            continue
        if isinstance(requirements, list):
            if requirements:
                print("apt-get install -qq", " ".join(requirements))
        else:
            for k, v in requirements.items():
                if k == "build-dep":
                    print("apt-get build-dep -qq", " ".join(v))
                elif k == "packages":
                    print("apt-get install -qq", " ".join(v))
                else:
                    logger.warning("Unknown key '{}' in requirements", k)

    return 0
Beispiel #3
0
def main(project_dir, var_matrix_path, log, sep):
    config_logging(log, "INFO")
    LOG.info("Starting Analysis")
    sep = {'comma': ',', 'space': ' ', 'tab': '\t'}[sep]
    pattern_json = json.loads(
        open(
            get_file_from_project(project_dir,
                                  "./minimum_spanning_set/amplicons_*.json"),
            'r').read())
    best_loci = remove_extra_loci(pattern_json)
    haplotype_df = pd.read_csv(get_file_from_project(
        project_dir, "./minimum_spanning_sehaplotype_*.csv"),
                               index_col=0)
    pattern_order = list(haplotype_df.columns)
    write_dataframe_to_file(
        get_haplotype_matrix(pattern_order, best_loci, var_matrix_path, sep),
        os.path.join(project_dir,
                     "minimum_spanning_set/haplotypes_matrix.csv"))
    scores, patterns = get_resolution(haplotype_df)
    write_dataframe_to_file(get_summary_data(best_loci, scores, pattern_order),
                            os.path.join(
                                project_dir,
                                "minimum_spanning_set/amplicon_matrix.csv"),
                            index=False)
    write_dataframe_to_file(
        get_resolution_matrix(haplotype_df.index, pattern_order, patterns),
        os.path.join(project_dir, "minimum_spanning_set/pattern_matrix.csv"))
Beispiel #4
0
def main(spike_events_path, spike_events_namespace, populations,
         spike_hist_bin, t_variable, t_max, t_min, lag, max_cells, graph_type,
         font_size, verbose):

    utils.config_logging(verbose)

    if t_max is None:
        time_range = None
    else:
        if t_min is None:
            time_range = [0.0, t_max]
        else:
            time_range = [t_min, t_max]

    if not populations:
        populations = ['eachPop']

    if graph_type is None:
        graph_type = 'matrix'

    plot.plot_spike_histogram_autocorr(spike_events_path,
                                       spike_events_namespace,
                                       include=populations,
                                       time_range=time_range,
                                       time_variable=t_variable,
                                       lag=lag,
                                       bin_size=spike_hist_bin,
                                       maxCells=max_cells,
                                       graph_type=graph_type,
                                       fontSize=font_size,
                                       saveFig=True)
Beispiel #5
0
def AcornServerComms(stop_signal, acorn_pipe, server_endpoint, logging):
    logger = logging.getLogger('main.comms')
    utils.config_logging(logger, debug=False)

    REQUEST_TIMEOUT = 4500
    REQUEST_RETRIES = 30

    logger.info("Connecting to server at {} …".format(server_endpoint))
    context = zmq.Context()
    client = context.socket(zmq.REQ)
    client.connect(server_endpoint)

    for sequence in itertools.count():
        if stop_signal.is_set():
            break
        if not acorn_pipe.poll(timeout=_POLL_TIMEOUT_SEC):
            logger.error("No data from master.")
            continue

        request = acorn_pipe.recv()
        seq_string = str(sequence).encode()
        logger.debug("Sending (%s)", seq_string)
        request.insert(0, seq_string)
        client.send_multipart(request)

        # time.sleep(0.5)

        retries_left = REQUEST_RETRIES
        while not stop_signal.is_set() and retries_left > 0:
            if (client.poll(REQUEST_TIMEOUT) & zmq.POLLIN) != 0:
                reply = client.recv_multipart()
                # print(reply)

                if int(reply[0]) == sequence:
                    logger.debug("Server replied OK (%s...)", reply[:2])
                    acorn_pipe.send(reply[1:])
                    break
                else:
                    logger.error("Malformed reply from server: %s", reply)
                    continue

            retries_left -= 1
            logger.warning("No response from server")
            # Socket is confused. Close and remove it.
            client.setsockopt(zmq.LINGER, 0)
            client.close()
            logger.info("Reconnecting to server…")
            # Create new connection
            client = context.socket(zmq.REQ)
            client.connect(server_endpoint)
            logger.info("Resending (%s)", seq_string)
            client.send_multipart(request)
Beispiel #6
0
def main(args: Namespace) -> int:
    config_logging(args)
    logger.debug(args)

    os.chdir(args.build_dir)
    if args.bc_list is None:
        args.bc_list = rglob(".")
        args.bc_list = list(filter_bc_list(args))

    for step in args.steps:
        ret = STEP_MAP[step]["cb"](args)
        if ret > 0 and args.exit_on_error:
            return ret

    return 0
    def __init__(self, ec2_manager, ec2_instance_id, ec2_instance_dns):
        '''
		Since this is run on the main process, it shouldn't
		open connection or file descriptors.
		'''
        # Zeno task manager
        self.tasks = zeno.TasksManager("tasks",
                                       host=config.DB_HOST,
                                       user=config.DB_USER,
                                       passwd=config.DB_PASSWD)

        # Database connection
        self.db = MyMySQL(db=config.DB_NAME,
                          host=config.DB_HOST,
                          user=config.DB_USER,
                          passwd=config.DB_PASSWD)

        # EC2 manager to issue commands.
        self.ec2_manager = ec2_manager

        # EC2 instance information to be used as a proxy.
        self.ec2_instance_id = ec2_instance_id
        self.ec2_instance_dns = ec2_instance_dns

        # Logging configuration
        self.log = utils.config_logging(
            'downloader',
            stream=sys.stdout,
            level=logging.DEBUG,
            format='%(asctime)s (%(name)s) [%(levelname)6s]: %(message)s',
            datefmt="%Y-%m-%d %H:%M:%S")
Beispiel #8
0
    def __init__(self):

        # Zeno task manager
        self.tasks = zeno.TasksManager("tasks",
                                       host=config.DB_HOST,
                                       user=config.DB_USER,
                                       passwd=config.DB_PASSWD)

        # Database connection
        self.db = MyMySQL(db=config.DB_NAME,
                          host=config.DB_HOST,
                          user=config.DB_USER,
                          passwd=config.DB_PASSWD)

        # Logging configuration
        self.log = utils.config_logging(
            'tokenizer',
            stream=sys.stdout,
            level=logging.DEBUG,
            format='%(asctime)s (%(name)s) [%(levelname)6s]: %(message)s',
            datefmt="%Y-%m-%d %H:%M:%S")

        self.MIN_TOKENS = 10

        # Create folders with non existing
        utils.ensure_folder(os.path.dirname(config.TOKENS_PATH))
        utils.ensure_folder(os.path.dirname(config.TOKENS_PATH_PARTS))
def main(args: Namespace) -> int:
    config_logging(args)
    logger.debug(args)

    args.items = load_config(args)
    logger.debug("Got items: {}", args.items)
    if not args.items:
        logger.info("Nothing to do!")
        return 0

    if args.build_dir:
        os.makedirs(args.build_dir, exist_ok=True)
        os.chdir(args.build_dir)

    if args.clone:
        if not clone_all(args.items):
            return 1
        if args.clone_only:
            return 0

    successful = list(build_iter(args))
    if successful:
        # Items that succeeded this time should always be in done.log
        successful_urls = set(d["url"] for d in successful)
        # This that failed should always be removed from done.log
        failed = set(d["url"] for d in args.items) - successful_urls
        # All other should be preserved
        successful_urls.update(set(get_done("done.log")))
        # Order of operations matters here: First add old and new successes and then remove new failures
        successful_urls.difference_update(failed)
        with open("done.log", "w") as f:
            print("\n".join(sorted(successful_urls)), file=f)

    failures = len(args.items) - len(successful)
    if failures > 0 and args.exit_on_error:
        return 1
    return failures
def main():
    global PROGRAM_CONFIG

    # 加载配置
    config = load_config()
    if config:
        PROGRAM_CONFIG = config

    # 获取日志等级
    if PROGRAM_CONFIG['log_level'] == 'DEBUG':
        config_logging(logging.DEBUG, PROGRAM_CONFIG['log_name'])
    elif PROGRAM_CONFIG['log_level'] == 'INFO':
        config_logging(logging.debug, PROGRAM_CONFIG['log_name'])
    elif PROGRAM_CONFIG['log_level'] == 'ERROR':
        config_logging(logging.ERROR, PROGRAM_CONFIG['log_name'])
    elif PROGRAM_CONFIG['log_level'] == 'WARN':
        config_logging(logging.WARN, PROGRAM_CONFIG['log_name'])
    else:
        config_logging()

    server = ProxyServer(PROGRAM_CONFIG['listen_port'])

    # 用于检查客户端在线状态的线程
    check_thread = Thread(target=check_alive_thread)

    try:
        print('启动检查客户端在线状态的线程')
        check_thread.start()

        server.start()
    except Exception as err:
        LOG.error(str(err))
    finally:
        print('写入配置信息')
        save_config(PROGRAM_CONFIG)
        print('退出程序')
Beispiel #11
0
def main():
    global PROGRAM_CONFIG

    # 加载配置
    config = load_config()
    if config:
        PROGRAM_CONFIG = config

    # 获取日志等级
    if PROGRAM_CONFIG['log_level'] == 'DEBUG':
        config_logging(logging.DEBUG, PROGRAM_CONFIG['log_name'])
    elif PROGRAM_CONFIG['log_level'] == 'INFO':
        config_logging(logging.debug, PROGRAM_CONFIG['log_name'])
    elif PROGRAM_CONFIG['log_level'] == 'ERROR':
        config_logging(logging.ERROR, PROGRAM_CONFIG['log_name'])
    elif PROGRAM_CONFIG['log_level'] == 'WARN':
        config_logging(logging.WARN, PROGRAM_CONFIG['log_name'])
    else:
        config_logging()

    server = ProxyServer(PROGRAM_CONFIG['listen_port'])

    # 用于检查客户端在线状态的线程
    check_thread = Thread(target=check_alive_thread)

    try:
        print('start the thread of checking client online status')
        check_thread.start()

        server.start()
    except Exception as err:
        LOG.error(str(err))
    finally:
        print('write config info')
        save_config(PROGRAM_CONFIG)
        print('to exit progress')
Beispiel #12
0
def main():
    global PROGRAM_CONFIG

    # 加载配置
    config = load_config()
    if config:
        PROGRAM_CONFIG = config

    # 获取日志等级
    if PROGRAM_CONFIG['log_level'] == 'DEBUG':
        config_logging(logging.DEBUG, PROGRAM_CONFIG['log_name'])
    elif PROGRAM_CONFIG['log_level'] == 'INFO':
        config_logging(logging.debug, PROGRAM_CONFIG['log_name'])
    elif PROGRAM_CONFIG['log_level'] == 'ERROR':
        config_logging(logging.ERROR, PROGRAM_CONFIG['log_name'])
    elif PROGRAM_CONFIG['log_level'] == 'WARN':
        config_logging(logging.WARN, PROGRAM_CONFIG['log_name'])
    else:
        config_logging()

    server = ProxyServer(PROGRAM_CONFIG['listen_port'])

    # 用于检查客户端在线状态的线程
    check_thread = Thread(target=check_alive_thread)

    try:
        print('启动检查客户端在线状态的线程')
        check_thread.start()

        server.start()
    except Exception as err:
        LOG.error(str(err))
    finally:
        print('写入配置信息')
        save_config(PROGRAM_CONFIG)
        print('退出程序')
Beispiel #13
0
    def __init__(self):
        ''' Converter constructor.'''

        # Zeno task manager
        self.tasks = zeno.TasksManager("tasks",
                                       host=config.DB_HOST,
                                       user=config.DB_USER,
                                       passwd=config.DB_PASSWD)

        # Database connection
        self.db = MyMySQL(db=config.DB_NAME,
                          host=config.DB_HOST,
                          user=config.DB_USER,
                          passwd=config.DB_PASSWD)

        # Logging configuration
        self.log = utils.config_logging(
            'converter',
            stream=sys.stdout,
            level=logging.DEBUG,
            format='%(asctime)s (%(name)s) [%(levelname)6s]: %(message)s',
            datefmt="%Y-%m-%d %H:%M:%S")
def wifi_process(stop_signal, master_conn, logging, debug):
    logger = logging.getLogger('main.wifi')
    utils.config_logging(logger, debug)
    interfaces = netifaces.interfaces()
    if "wlan0" in interfaces and "wlan1" in interfaces:
        wlan0_ip = None
        wlan1_ip = None
        try:
            # Raises if wlan0 has no ipv4 ip.
            wlan0_ip = netifaces.ifaddresses('wlan0')[2][0]['addr']
        except KeyError:
            pass
        try:
            # Raises if wlan1 has no ipv4 ip.
            wlan1_ip = netifaces.ifaddresses('wlan1')[2][0]['addr']
        except KeyError:
            pass
        if wlan0_ip and wlan1_ip:
            logger.info(
                "Found IP addresses for wlan0 and wlan1 so turning off wlan0.")
            logger.info("wlan1 IP is {}".format(wlan1_ip))
            subprocess.check_call("ifconfig wlan0 down", shell=True)
            logger.info("Turned off wlan0.")
        else:
            logger.info(
                "Two connected wifi adapters present but both not connected " +
                "so did not turn off wlan0.")
            logger.info("wlan0 IP: {}  | wlan1 IP: {}".format(
                wlan0_ip, wlan1_ip))
        log_counter = 0
        while not stop_signal.is_set():
            linkdata = subprocess.check_output("iw dev wlan1 link", shell=True)
            linkdata = linkdata.splitlines()
            tempdata = subprocess.check_output(
                "/opt/vc/bin/vcgencmd measure_temp", shell=True)
            try:
                # Note: My deepest apologies for not using regex here. - TLA
                signal = str(linkdata[5]).split(':')[1].split(' ')[1]
                station_mac = str(linkdata[0]).split()[2]
                if station_mac in access_points.keys():
                    station_name = access_points[station_mac]
                else:
                    station_name = station_mac

                temp = float(str(tempdata).split('=')[1].split('\'')[0])
                # print("Temp = {} C".format(temp))

                if master_conn:
                    master_conn.send((signal, station_name, temp))
                    log_counter += 1
                    if log_counter >= _LOG_SKIP_COUNT:
                        logger.info(
                            "Wifi RSSI: {} dBm, Station: {}, CPU Temp {}".
                            format(signal, station_name, temp))
                        log_counter = 0
                else:
                    logger.info(station_name)
                    logger.info("Wifi RSSI: {} dBm".format(signal))
            except Exception as e:
                print(e)
                # raise e
            time.sleep(0.5)
    else:
        logger.info(
            "Did not find two wifi adapters so wlan0 will not be disabled.")
Beispiel #15
0
                        default=1,
                        help="Number of threads for multiprocessing")

    ARGS = PARSER.parse_args()

    # Make project directory instance
    # Set up project tree
    # Project_Name_Y_M_D_H_M_S
    # ---- patterns
    # ---- history
    # ---- logs
    PROJECT_DIR = Project_Directory(ARGS.project_dir, ARGS.project_name,
                                    ["patterns", "logs", "history", "flags"])

    config_logging(
        os.path.join(PROJECT_DIR.get_sub_directory("logs"),
                     "{0}_preprocessing.log".format(PROJECT_DIR.project_name)),
        ARGS.log)

    SEP = {'comma': ',', 'space': ' ', 'tab': '\t'}[ARGS.sep]

    PARAMS = {
        "strict": ARGS.strict,
        "window": ARGS.window,
        "strain_cutoff": ARGS.strain_cutoff,
        "pz_size": ARGS.pz_size,
        "pz_filter_percent": ARGS.pz_filter_percent,
        "pz_filter_length": ARGS.pz_filter_length,
        "exclude_strains": ARGS.exclude_strains,
        "sep": SEP,
        "n_threads": ARGS.threads
    }
Beispiel #16
0
 def __init__(self, simulation, debug):
     self.simulation = simulation
     self.debug = debug
     self.logger = logging.getLogger('main')
     config_logging(self.logger, self.debug)
Beispiel #17
0
import logging
import torchvision
import numpy as np
import torch.nn as nn
from sklearn import metrics
from torch.utils.data import DataLoader, Dataset
from polyvore_dataset import TripletDataset, CategoryDataset
from evaluate import test_compatibility_auc, test_fitb_quesitons
from utils import AverageMeter, prepare_dataloaders, config_logging
from model import CompatModel

# Leave a comment for this training, and it will be used for name suffix of log and saved model
comment = '_'.join(sys.argv[1:])

# Logger
config_logging(comment)

# Hyperparameters
img_size = 112
emb_size = 64
device = torch.device("cuda")

# Dataloader
transform = torchvision.transforms.Compose([
    torchvision.transforms.Scale((img_size, img_size)),
    torchvision.transforms.CenterCrop(112),
    torchvision.transforms.RandomHorizontalFlip(),
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225]),
])
Beispiel #18
0
                    type=int,
                    default=30,
                    help='size of latent vector z')
# --- netI params
parser.add_argument('--archI', default='FC_selu')
parser.add_argument('--netI', default=None)
parser.add_argument('--hidden_layer_size', type=int, default=512)
parser.add_argument('--num_extra_layers', type=int, default=2)
parser.add_argument('--nw',
                    type=int,
                    default=30,
                    help='size of latent vector w')
args = parser.parse_args()

os.system('mkdir -p {0}'.format(args.outdir))
utils.config_logging(logging, args.outdir)
utils.seedme(args.seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

netG = getattr(models, args.archG)(image_size=args.image_size,
                                   nz=args.nz,
                                   image_depth=args.image_depth,
                                   num_filters=args.num_filters).to(device)
netG.load_state_dict(torch.load(args.netG))
for p in netG.parameters():
    p.requires_grad_(False)
netG.eval()
print netG

netI = getattr(models,
               args.archI)(input_size=args.nw,
Beispiel #19
0
                       vocab_infos=vocab_infos,
                       embed_fields=embed_fields,
                       hidden_units=hidden_units,
                       L2=L2,
                       optimizer=optimizer)


if __name__ == "__main__":
    # ************ define command-line-arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('-e', "--estimator")
    parser.add_argument('-n', "--n_epoches", type=int, default=10)
    args = parser.parse_args()

    # ************ prepare
    utils.config_logging('log_{}.log'.format(args.estimator))

    data_source = DataSource(batch_size=32)

    deep_hparams = get_deep_hparams(embed_size=16,
                                    hidden_units=[64, 16],
                                    L2=0.01,
                                    learning_rate=0.001)

    wide_hparams = WideHParams(field_names=CATEGORY_FIELDS,
                               alpha=0.1,
                               beta=1,
                               L1=0.1,
                               L2=0.1)

    # ************ run
Beispiel #20
0
TRAIN_FILE = os.path.join(DATA_DIR, 'train.data')
TEST_FILE = os.path.join(DATA_DIR, 'test.data')


def main():
    input_file = TRAIN_FILE
    out_file = os.path.join(RESULT_DIR, MODEL_NAME)
    classifier = ft.supervised(input_file,
                               out_file,
                               dim=20,
                               lr=0.25,
                               word_ngrams=2,
                               min_count=20,
                               bucket=1000000,
                               epoch=5,
                               thread=10,
                               silent=0,
                               label_prefix="__label__")

    # test the classifier
    result = classifier.test(TEST_FILE)

    logger.info('P@1: %s', result.precision)
    logger.info('R@1: %s', result.recall)
    logger.info('Number of examples: %s', result.nexamples)


if __name__ == '__main__':
    utils.config_logging()
    main()
NORTH_ALIGNED_4_METER_GRID = 2

GRID_VERSION = NORTH_ALIGNED_4_METER_GRID

USE_DATABASE = False

if USE_DATABASE:
    import redis
    import pickle
    r = redis.Redis(host='192.168.1.170', port=6379)

STATIONARY_TIME = 30

logger = logging.getLogger('rtk_triangles')

config_logging(logger)

tcp_sock1 = None

if not USE_DATABASE:
    rtk_process.launch_rtk_sub_procs(single=True, logger=logger)
    tcp_sock1, tcp_sock2 = rtk_process.connect_rtk_procs(single=True,
                                                         logger=logger)
    # raise Exception()
print_gps_counter = 0
latest_sample = None
position_list = []
last_bad_status_time = 0
bad_tone_time = 0
good_tone_time = 0
save_file_okay = True
Beispiel #22
0
    EXC_STRAINS.add_argument("--excl_strains_file",
                             type=file_type,
                             default=None,
                             help="Path to file containing list of strains "
                             "(in a single column) that should be excluded")

    ARGS = PARSER.parse_args()

    PROJECT_DIR = Project_Directory(
        ARGS.project_dir, ARGS.project_name,
        ["summary", "logs", "history", "minimum_spanning_set"],
        ["patterns", "flags", "history"])

    config_logging(
        os.path.join(
            PROJECT_DIR.get_sub_directory("logs"),
            "{0}_pattern_selection.log".format(PROJECT_DIR.project_name)),
        ARGS.log)

    LOGGER = logging.getLogger(__name__)

    try:
        if ARGS.req_loci_file is not None:
            ARGS.required_loci = read_list_file(ARGS.req_loci_file)
    except IOError:
        LOGGER.error("Cannot open required loci file: %s", ARGS.req_loci_file)
        raise

    try:
        if ARGS.excl_loci_file is not None:
            ARGS.exclude_loci = read_list_file(ARGS.excl_loci_file)
Beispiel #23
0
def main(args):

    # preparation
    if not os.path.exists(args.exp_dir):
        os.makedirs(args.exp_dir)
    config_logging(os.path.join(args.exp_dir, "%s.log" % args.exp_name))
    log.info("Experiment %s" % (args.exp_name))
    log.info("Receive config %s" % (args.__str__()))
    log.info("Start creating tasks")
    pretrain_task = [get_task(taskname, args) for taskname in args.pretrain_task]
    finetune_tasks = [get_task(taskname, args) for taskname in args.finetune_tasks]
    log.info("Start loading data")

    if args.image_pretrain_obj != "none" or args.view_pretrain_obj != "none":
        for task in pretrain_task:
            task.load_data()
    for task in finetune_tasks:
        task.load_data()

    log.info("Start creating models")
    if len(pretrain_task):
        if args.image_pretrain_obj != "none":
            image_ssl_model = get_model("image_ssl", args)
            log.info("Loaded image ssl model")

        if args.view_pretrain_obj != "none":
            view_ssl_model = get_model("view_ssl", args)
            log.info("Loaded view ssl model")

    if args.finetune_obj != "none": 
        sup_model = get_model("sup", args)
        log.info("Loaded supervised model")

    #if args.load_ckpt != "none":
    #    load_model(model, pretrain_complete_ckpt)

    # pretrain
    if len(pretrain_task):
        if args.image_pretrain_obj != "none":
            image_ssl_model.to(args.device)
            pretrain = Trainer("pretrain", image_ssl_model, pretrain_task[0], args)
            pretrain.train()
            image_pretrain_complete_ckpt = os.path.join(
                args.exp_dir, "image_pretrain_%s_complete.pth" % pretrain_task[0].name
            )
            save_model(image_pretrain_complete_ckpt, image_ssl_model)
        else:
            if args.imagessl_load_ckpt:
                image_pretrain_complete_ckpt = args.imagessl_load_ckpt

        if args.view_pretrain_obj != "none":
            view_ssl_model.to(args.device)
            pretrain = Trainer("pretrain", view_ssl_model, pretrain_task[0], args)
            pretrain.train()
            view_pretrain_complete_ckpt = os.path.join(
                args.exp_dir, "view_pretrain_%s_complete.pth" % pretrain_task[0].name
            )
            save_model(view_pretrain_complete_ckpt, view_ssl_model)
        else:
            if args.viewssl_load_ckpt:
                view_pretrain_complete_ckpt = args.viewssl_load_ckpt

    # finetune and test
    for task in finetune_tasks:
        if args.imagessl_load_ckpt is not "none":
            pretrained_dict = torch.load(image_pretrain_complete_ckpt,map_location=torch.device('cpu'))
            model_dict = sup_model.state_dict()
            tdict = model_dict.copy()
            # print(sup_model.image_network.parameters())
            # print((sup_model.image_network[1].weight.data))
            # wtv = sup_model.image_network[0].weight.data
            # print(tdict.items()==model_dict.items())
            # print(type(tdict),type(model_dict))


            # print(model_dict.keys())
            # print("\n\n\n")

            
            pretrained_dict = {k.replace("patch","image"): v for k, v in pretrained_dict.items() if k.replace("patch","image") in model_dict}
            # print(pretrained_dict.keys())
            # print("\n\n\n")

            model_dict.update(pretrained_dict)
            sup_model.load_state_dict(model_dict)
            # print(type(tdict),type(model_dict))
            # print(sup_model.image_network[1].weight.data)
            # print((tdict.items()==model_dict.items()).all())
            
            


       
        if "adv" in args.finetune_obj:
            # print(type(sup_model))
            sup_model["generator"].to(args.device)
            sup_model["discriminator"].to(args.device)
            finetune = GANTrainer("finetune", sup_model, task, args)
        else:
            sup_model.to(args.device)
            finetune = Trainer("finetune", sup_model, task, args)

        finetune.train()
        finetune.eval("test")
        if "adv" in args.finetune_obj:
            finetune_generator_complete_ckpt = os.path.join(
                    args.exp_dir, "finetune_%s_generator_complete.pth" % task.name
                )

            save_model(finetune_generator_complete_ckpt, sup_model["generator"])

            finetune_discriminator_complete_ckpt = os.path.join(
                    args.exp_dir, "finetune_%s_discriminator_complete.pth" % task.name
                )

            save_model(finetune_discriminator_complete_ckpt, sup_model["discriminator"])
        
        else:
            finetune_complete_ckpt = os.path.join(
                    args.exp_dir, "finetune_%s_complete.pth" % task.name
                )

            save_model(finetune_complete_ckpt, sup_model)
        

    # evaluate
    # TODO: evaluate result on test split, write prediction for leaderboard submission (for dataset
    # without test labels)
    log.info("Done")
    return