Пример #1
0
    def __get_available_components(self):
        """
		
		"""
        components_dir = self.builder_params.get('components_dir')
        config_filepath = util_methods.locate_config(components_dir)

        # get the plugin parameters-- i.e. each component needs to have a script and entry method to call.
        plugin_parameters = cfg_parser.read_config(config_filepath,
                                                   'plugin_params')
        self.builder_params.add(plugin_parameters)
        entry_module = plugin_parameters[
            'entry_module']  #the script filename (minus the .py extension)
        entry_method = plugin_parameters['entry_method']

        logging.info(
            "Search for available components with configuration file at: %s",
            config_filepath)
        available_components = cfg_parser.read_config(config_filepath,
                                                      'plugins')

        # the paths in the dictionary above are relative to the components_dir-- prepend that directory name for the full path
        available_components = {
            k: os.path.join(components_dir, available_components[k])
            for k in available_components.keys()
        }

        # check that the plugin components have the required structure
        self.available_components = {}
        for k in available_components.keys():
            if util_methods.component_structure_valid(available_components[k],
                                                      entry_module,
                                                      entry_method):
                self.available_components[k] = available_components[k]

        logging.info('Available components: ')
        logging.info(pretty_print(self.available_components))

        # get the specifications for the standard components and the analysis components
        self.standard_components = [
            c for c in cfg_parser.read_config(config_filepath,
                                              'standard_plugins').values()[0]
            if c in self.available_components.keys()
        ]
        self.analysis_components = [
            c for c in cfg_parser.read_config(config_filepath,
                                              'analysis_plugins').values()[0]
            if c in self.available_components.keys()
        ]
        logging.info('Standard components: %s', self.standard_components)
        logging.info('Analysis components: %s', self.analysis_components)
Пример #2
0
def sync_surveys():
    '''Driver function to subscribe to the alerts of microlensing events produced by surveys OGLE, MOA
    and KMTNet.  The function downloads their model parameters and data wherever available. 
    '''

    # Read script configuration:
    config_file_path = '/home/robouser/.robonet_site/surveys_sync.xml'
    config = config_parser.read_config(config_file_path)
    
    log = log_utilities.start_day_log( config, __name__ )
    log.info( 'Started sync of survey data')

    # Harvest parameters of lenses detected by OGLE
    ogle_data = get_ogle_parameters(config, log)


# Sync against database

    # Harvest MOA information
    moa_data = get_moa_parameters(config, log)

# Sync against database

    # Harvest KMTNet information
    # KMTNet are not producing alerts yet
    #get_kmtnet_parameters(config)
    
    log_utilities.end_day_log( log )
Пример #3
0
def sync_artemis():
    '''Driver function to maintain an up to date copy of the data on all microlensing events from 
        the ARTEMiS server at Univ. of St. Andrews.
    '''
    
    # Read configuration:
    config_file_path = '/home/robouser/.robonet_site/artemis_sync.xml'
    config = config_parser.read_config(config_file_path)
    log = log_utilities.start_day_log( config, __name__ )
    log.info('Started sync with ARTEMiS server')
    
    # Sync the results of ARTEMiS' own model fits for all events:
    sync_artemis_data_db(config,'model',log)

    # Sync the event parameters published by the surveys from the ARTEMiS server:
    sync_artemis_data_db(config,'pubpars',log)

    # Sync the event photometry data from the ARTEMiS server:
    sync_artemis_data_db(config,'data',log)

    # Sync ARTEMiS' internal fileset, to gain access to the anomaly indicators:
    rsync_internal_data(config)

    # Tidy up and finish:
    log_utilities.end_day_log( log )
Пример #4
0
def is_installed():
    '''
    Returns True if modelsim is installed on the computer in the expected location
    Checkes the config file to verify if a custom path should be used.
    '''

    config_parameters = config_parser.read_config()
    global modelsim_path
    custom_path = False
    custom_path_found = False

    for x in config_parameters:
        if "MODELSIM PATH" in x[0].upper():
            modelsim_path = x[1]
            custom_path_found = True

    if custom_path_found == False:
        modelsim_path = r'C:\Program Files\modeltech64_2019.3\win64'
        print("Changing ModelSim Path to default : ", modelsim_path)
    else:
        print("ModelSim Path : ", modelsim_path)

    is_dir = os.path.isdir(modelsim_path)

    return is_dir
Пример #5
0
def trigger(topic=None, details=None):
    key = config_parser.read_config().get('ifttt', 'key')
    url = "https://maker.ifttt.com/trigger/" + str(
        details) + "/with/key/" + str(key)
    ahlogger.log(url)
    r = requests.post(url)
    ahlogger.log(r.status_code)
Пример #6
0
    def __read_pipeline_config(self):

        # Read the pipeline-level config file
        config_filepath = util_methods.locate_config(
            self.builder_params.get('pipeline_home'))
        logging.info("Default pipeline configuration file is: %s",
                     config_filepath)
        return cfg_parser.read_config(config_filepath)
Пример #7
0
    def __get_aligner_info(self):
        """
		Finds and parses the aligner configuration file-- this only indicates which aligners
		are available and which are default.  Nothing specific to a particular aligner.
		"""
        aligner_cfg = util_methods.locate_config(
            self.builder_params.get('aligners_dir'))
        self.builder_params.add(cfg_parser.read_config(aligner_cfg))
Пример #8
0
def test_tap_reader():
    
    config = config_parser.read_config( '../configs/exofop_publish.xml' )

    tap_data = exofop_publisher.load_tap_output( config )
    
    for event_name, entry in tap_data.items():
        if 'MOA-2015-BLG-499' in event_name: 
            print ':'+event_name+':', entry
Пример #9
0
def main():

    # create temp directory if it doesn't exist
    os.makedirs('temp', exist_ok=True)

    config_parameters = config_parser.read_config()

    quartus_path = 'C:/intelFPGA/18.1/quartus/bin64'

    for x in config_parameters:
        if "QUARTUS PATH" in x[0].upper():
            quartus_path = x[1]

    # exit if quartus is not installed in the expected location
    if not os.path.isdir(quartus_path):
        print(r'Quartus is not installed in the expected location: ',
              quartus_path)
        print(
            'If you are in the TLA, You can use the computers by the soldering irons, or the lab across the hallway'
        )
        print(
            'Additionally, if you are attempting a custom install, make sure to install quartus prime'
        )
        exit(1)

    # list vhd files to include in the quartus project
    vhd_list = gp.find_vhd_files(dir='src')
    if vhd_list == []:
        print('no vhd files were found')
        exit(1)

    # create quartus directory if it doesn't exist, if another process is using
    # The directory we need to exit
    try:
        shutil.rmtree('internal/QuartusWork')
    except FileNotFoundError:
        pass
    except Exception as e:
        print("Could not delete QuartusWork", e)
        exit(1)
    os.makedirs('internal/QuartusWork')

    gp.write_qsf(vhd_list, dir='internal/QuartusWork')
    gp.write_qpf(dir='internal/QuartusWork')
    gp.write_sdc(dir='internal/QuartusWork')

    build_success = build.build_all()
    if not build_success:
        print(no_timings_message)
        exit(1)

    parse_success = parse_timings.parse_timings()
    if not parse_success:
        exit(1)

    # Use Popen to start notepad in a non-blocking manner
    subprocess.Popen(['Notepad', 'temp/timing.txt'])
Пример #10
0
 def update_config(self):
     config_list = config_parser.read_config()
     self.units = config_list['units']
     self.max_limit = config_list['max_limit']
     self.sensor_name_list = [
         config_list['sensor_1_name'], config_list['sensor_2_name'],
         config_list['sensor_3_name'], config_list['sensor_4_name'],
         config_list['sensor_5_name']
     ]
     print(config_list)
def main():
    # Prior to training, please adapt the hyper parameters in the config_parser.py and run the script to generate
    # the training config file use to train your own VOCA model.

    pkg_path, _ = os.path.split(os.path.realpath(__file__))
    init_config_fname = os.path.join(pkg_path, 'training_config.cfg')
    if not os.path.exists(init_config_fname):
        print('Config not found %s' % init_config_fname)
        create_default_config(init_config_fname)

    config = configparser.ConfigParser()
    config.read(init_config_fname)

    # Path to cache the processed audio
    config.set(
        'Input Output', 'processed_audio_path',
        './training_data/processed_audio_%s.pkl' %
        config.get('Audio Parameters', 'audio_feature_type'))

    checkpoint_dir = config.get('Input Output', 'checkpoint_dir')
    if os.path.exists(checkpoint_dir):
        print('Checkpoint dir already exists %s' % checkpoint_dir)
        key = input(
            'Press "q" to quit, "x" to erase existing folder, and any other key to continue training: '
        )
        if key.lower() == 'q':
            return
        elif key.lower() == 'x':
            try:
                shutil.rmtree(checkpoint_dir, ignore_errors=True)
            except:
                print('Failed deleting checkpoint directory')

    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    config_fname = os.path.join(checkpoint_dir, 'config.pkl')
    if os.path.exists(config_fname):
        print('Use existing config %s' % config_fname)
    else:
        with open(config_fname, 'w') as fp:
            config.write(fp)
            fp.close()

    config = read_config(config_fname)
    data_handler = DataHandler(config)
    batcher = Batcher(data_handler)

    with tf.Session() as session:
        model = Model(session=session, config=config, batcher=batcher)
        model.build_graph()
        model.load()
        model.train()
Пример #12
0
    def __check_project_config(self):
        """
		Reads a project configuration file-- this configuration file lays out how a typical project is arranged in terms of file hierarchy,
		naming of fastq files, etc.  Parameters are added to the builder_params object
		"""
        # Read the project-level config file
        if not self.builder_params.get('project_configuration_file'):
            default_filepath = util_methods.locate_config(
                self.builder_params.get('project_configurations_dir'),
                'default')
            self.builder_params.reset_param('project_configuration_file',
                                            default_filepath)

        config_filepath = self.builder_params.get('project_configuration_file')
        logging.info("Project configuration file is: %s", config_filepath)
        self.builder_params.add(cfg_parser.read_config(config_filepath))
Пример #13
0
    def load(self, filename):
        world, agents, walls = read_config(filename)
        #load world parameters
        self.width = world['width']
        self.height = world['height']
        self.visibility_threshold = world['v_threshold']
        self.grid = Grid(width=self.width, height=self.height)

        #Load agent nodes
        for ag in agents:
            a = AgentNode(address=ag['mac'], x=ag['x'], y=ag['y'])
            a.set_station()
            self.add_node(a)
        #Load walls
        for wl in walls:
            w = WallNode(x=wl['x'], y=wl['y'])
            self.add_node(w)
Пример #14
0
def k2_events(campaign=9, year=2016):
    """Function to analyse and select events from K2 field"""
    
    # Parse script's own config
    config_file_path = '../configs/surveys_sync.xml'
    config = config_parser.read_config(config_file_path)
        
    # Read in data from surveys
    survey_events = exofop_publisher.load_survey_event_data( config )

    # Initialize K2-footprint data:
    k2_footprint = k2_footprint_class.K2Footprint( campaign, year )    
    
    # Plot events relative to the K2 footprint:
    plot_file = path.join( config['log_directory'], \
                    'K2C9_targets_in_footprint.png' )
    k2_footprint.targets_in_footprint( survey_events )
    k2_footprint.targets_in_superstamp( survey_events )
    k2_footprint.targets_in_campaign( survey_events )
    k2_footprint.plot_footprint( plot_file=plot_file, \
            targets=survey_events, year=2016 )
Пример #15
0
    def __check_genome_valid(self):
        """
		Ensure that the desired genome is acceptable.  If not, throw an exception
		If the appropriate genome is found, read-in the genome parameters (e.g. path to GTF file, etc)
		"""
        genomes_dir = self.builder_params.get('genomes_dir')
        selected_genome = self.builder_params.get('genome')
        try:
            config_filepath = util_methods.locate_config(genomes_dir)
            self.builder_params.add(
                cfg_parser.read_config(config_filepath, selected_genome))

        except Exception as ex:
            logging.error(
                'Caught exception while looking for genome configuration file: '
            )
            logging.error(ex.message)
            logging.error('Incorrect genome: %s', selected_genome)
            logging.error('See available genomes in : %s', genomes_dir)
            raise IncorrectGenomeException(
                "Incorrect or unconfigured genome specified.  Check log and correct as necessary."
            )
Пример #16
0
def rtmodel_subscriber(log=None, renamed=None):
    """Function to download the parameters of events modeled by RTModel"""
    
    # Read configuration:
    config_file_path = path.join(path.expanduser('~'),
                                 '.robonet_site', 'rtmodel_sync.xml')
    config = config_parser.read_config(config_file_path)
    
    if log == None:
        use_given_log = True
        log = log_utilities.start_day_log( config, __name__ )
        log.info('Started sync with RTmodel server')
    else:
        use_given_log = False
        
    # Scrape the list of events which have been modeled from the 
    # top level site:
    events_list = get_events_list( config, log )
    
    # Loop over all events, harvest the parameters of the best fit
    # for each one:
    rtmodels = {}
    for event_id in events_list:
        model = get_event_params( config, event_id, log )
        if renamed != None and model.event_name in renamed.keys():
            model.event_name = renamed[model.event_name]
            log.info('-> Switched name of event renamed by ARTEMiS to '+\
                             model.event_name)
        rtmodels[model.event_name] = model
        log.info( model.summary() )
        
    # Tidy up and finish:
    if use_given_log == False:
        log_utilities.end_day_log( log )
    
    return rtmodels    
Пример #17
0
    parser.add_argument('-st', '--split_term', default='gnrtdxb', help='split can be gnrt, clsf, or lgtd')
    parser.add_argument('-d', '--data_dir', help='path where the downloaded data is stored')
    parser.add_argument('-cp', '--checkpoint_dir', help='path where checkpoints file need to be stored')
    cols = 8

    args = parser.parse_args()

    if args.conf is None:
        args.conf = os.path.join(os.path.dirname(__file__), 'default.cfg')
        print('configuration file not specified, trying to load '
              'it from current directory', args.conf)

    if not os.path.exists(args.conf):
        print('Config not found' + args.conf)

    config = read_config(args.conf)

    print('Initializing parameters')
    template_file_path = config['template_fname']
    template_mesh = Mesh(filename=template_file_path)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    print('Generating transforms')
    M, A, D, U = mesh_operations.generate_transform_matrices(template_mesh, config['downsampling_factors'])

    D_t = [scipy_to_torch_sparse(d).to(device) for d in D]
    U_t = [scipy_to_torch_sparse(u).to(device) for u in U]
    A_t = [scipy_to_torch_sparse(a).to(device) for a in A]
    num_nodes = [len(M[i].v) for i in range(len(M))]
Пример #18
0
def main(args):
    if not os.path.exists(args.conf):
        print('Config not found' + args.conf)

    config = read_config(args.conf)

    print('Initializing parameters')
    template_file_path = config['template_fname']
    template_mesh = Mesh(filename=template_file_path)

    if args.checkpoint_dir:
        checkpoint_dir = args.checkpoint_dir
    else:
        checkpoint_dir = config['checkpoint_dir']
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    visualize = config['visualize']
    output_dir = config['visual_output_dir']
    if visualize is True and not output_dir:
        print(
            'No visual output directory is provided. Checkpoint directory will be used to store the visual results'
        )
        output_dir = checkpoint_dir

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    eval_flag = config['eval']
    lr = config['learning_rate']
    lr_decay = config['learning_rate_decay']
    weight_decay = config['weight_decay']
    total_epochs = config['epoch']
    workers_thread = config['workers_thread']
    opt = config['optimizer']
    batch_size = config['batch_size']
    val_losses, accs, durations = [], [], []

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    print('Generating transforms')
    M, A, D, U = mesh_operations.generate_transform_matrices(
        template_mesh, config['downsampling_factors'])
    print(len(M))

    for i in range(len(M)):
        print(M[i].v.shape)
    print('************A****************')
    for a in A:
        print(a.shape)
    print('************D****************')
    for d in D:
        print(d.shape)
    print('************U****************')
    for u in U:
        print(u.shape)

    D_t = [scipy_to_torch_sparse(d).to(device) for d in D]
    U_t = [scipy_to_torch_sparse(u).to(device) for u in U]
    A_t = [scipy_to_torch_sparse(a).to(device) for a in A]
    num_nodes = [len(M[i].v) for i in range(len(M))]

    print('Loading Dataset')
    if args.data_dir:
        data_dir = args.data_dir
    else:
        data_dir = config['data_dir']

    normalize_transform = Normalize()
    dataset = ComaDataset(data_dir,
                          dtype='train',
                          split=args.split,
                          split_term=args.split_term,
                          pre_transform=normalize_transform)
    dataset_test = ComaDataset(data_dir,
                               dtype='test',
                               split=args.split,
                               split_term=args.split_term,
                               pre_transform=normalize_transform)
    train_loader = DataLoader(dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=workers_thread)
    test_loader = DataLoader(dataset_test,
                             batch_size=1,
                             shuffle=False,
                             num_workers=workers_thread)

    print('Loading model')
    start_epoch = 1
    coma = Coma(dataset, config, D_t, U_t, A_t, num_nodes)
    if opt == 'adam':
        optimizer = torch.optim.Adam(coma.parameters(),
                                     lr=lr,
                                     weight_decay=weight_decay)
    elif opt == 'sgd':
        optimizer = torch.optim.SGD(coma.parameters(),
                                    lr=lr,
                                    weight_decay=weight_decay,
                                    momentum=0.9)
    else:
        raise Exception('No optimizer provided')

    checkpoint_file = config['checkpoint_file']

    if checkpoint_file:
        checkpoint = torch.load(checkpoint_file)
        start_epoch = checkpoint['epoch_num']
        coma.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        #To find if this is fixed in pytorch
        for state in optimizer.state.values():
            for k, v in state.items():
                if isinstance(v, torch.Tensor):
                    state[k] = v.to(device)
    coma.to(device)

    if eval_flag:
        val_loss = evaluate(coma, output_dir, test_loader, dataset_test,
                            template_mesh, device, visualize)
        print('val loss', val_loss)
        return

    best_val_loss = float('inf')
    val_loss_history = []
    train_loss_history = []

    for epoch in range(start_epoch, total_epochs + 1):
        print("Training for epoch ", epoch)
        train_loss = train(coma, train_loader, len(dataset), optimizer, device)
        val_loss = evaluate(coma,
                            output_dir,
                            test_loader,
                            dataset_test,
                            template_mesh,
                            device,
                            visualize=visualize)

        val_loss_history.append(val_loss)
        train_loss_history.append(train_loss)

        print('epoch ', epoch, ' Train loss ', train_loss, ' Val loss ',
              val_loss)
        if val_loss < best_val_loss:
            save_model(coma, optimizer, epoch, train_loss, val_loss,
                       checkpoint_dir)
            best_val_loss = val_loss
            val_losses.append(best_val_loss)

        if opt == 'sgd':
            adjust_learning_rate(optimizer, lr_decay)

    if torch.cuda.is_available():
        torch.cuda.synchronize()

    times = list(range(len(train_loss_history)))

    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.plot(times, train_loss_history)
    ax.plot(times, val_loss_history)
    ax.set_xlabel("iteration")
    ax.set_ylabel(" loss")
    plt.savefig(checkpoint_dir + 'result.png')
Пример #19
0
from flask import Flask, request
import json
from config_parser import read_config
from controller import recipe_store, get_recipe_details, delete_recipe_details
import os
config_path = os.getcwd()
configFilePath = config_path + "/config.ini"
config_dict = read_config(configFilePath)
app = Flask(__name__)


@app.route('/recipe', methods=['GET', 'POST', "PUT", "DELETE"])
def recipe():
    if request.method == 'GET':
        name = request.args.get("recipe_name")
        if not name:
            {"success": False, "message": "Please provide recipe name"}
        data = get_recipe_details(name)
        return json.dumps(data)
    if request.method in ['POST', "PUT"]:
        incoming_request = json.loads(request.data)
        return recipe_store(incoming_request)
    if request.method == 'DELETE':
        name = request.args.get("recipe_name")
        if not name:
            {"success": False, "message": "Please provide recipe name"}
        data = delete_recipe_details(name)
        return json.dumps(data)
Пример #20
0
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 24 01:11:39 2016

@author: robouser
"""

import exofop_publisher
import event_classes
import config_parser

config = config_parser.read_config( '../configs/exofop_publish.xml' )

events = {'master_index': { } }

e = event_classes.K2C9Event()
e.ogle_name = 'OGLE-2015-BLG-2093'
e.in_footprint = True
e.during_campaign = True

events['master_index'][1] = e

e2 = event_classes.K2C9Event()
e2.moa_name = 'MOA-2016-BLG-006'
e2.survey_id = 'gb10-R-6-38787'
e2.in_footprint = True
e2.during_campaign = True

events['master_index'][2] = e2

exofop_publisher.get_finder_charts( config, events )
Пример #21
0
 def __init__(self):
     self.config_list = config.read_config()
     self.quit = False
 def add_deck_textbox(self):
     config_parser.read_config()
     self.deck_textbox = QLineEdit(self.widget)
     self.deck_textbox.move(750, 0)
     self.deck_textbox.resize(110, 30)
     self.deck_textbox.setText(config_parser.deck_name)
Пример #23
0
    for filename in os.listdir(current_dir):
        if filename.endswith(".mp3"):
            new_file_name = filename.replace("  ", " ")
            new_file_name = filename.replace(' ', '_')
            f_name = os.path.splitext(new_file_name)[0]
            new_name = music_dir + f_name + "_" + date + ".mp3"
            os.rename(current_dir + "/" + filename, new_name)


def extract_mp3(command):
    os.system(command)


if __name__ == "__main__":

    config = config_parser.read_config()
    youtube_key = config.get('youtube', 'api_key')
    music_dir = config.get('media', 'music_dir')

    script_file = '/usr/bin/python ' + os.path.dirname(
        os.path.realpath(__file__)) + '/mp3_extractor.py'

    all_tacks = get_sportify_playlist("test", config)
    commands = []
    for each_track in all_tacks:
        video_url = get_youtube_id(each_track, youtube_key)
        if (video_url is not None):
            command = script_file + ' ' + video_url
            commands.append(command)

    pool = Pool(8)
Пример #24
0
def main(args):
    if not os.path.exists(args.conf):
        print('Config not found' + args.conf)

    config = read_config(args.conf)

    print('Initializing parameters')
    template_file_path = config['template_fname']
    template_mesh = Mesh(filename=template_file_path)

    if args.checkpoint_dir:
        checkpoint_dir = args.checkpoint_dir
    else:
        checkpoint_dir = config['checkpoint_dir']
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    visualize = config['visualize']
    output_dir = config['visual_output_dir']
    if visualize is True and not output_dir:
        print(
            'No visual output directory is provided. Checkpoint directory will be used to store the visual results'
        )
        output_dir = checkpoint_dir

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    eval_flag = config['eval']
    lr = config['learning_rate']
    lr_decay = config['learning_rate_decay']
    weight_decay = config['weight_decay']
    total_epochs = config['epoch']
    workers_thread = config['workers_thread']
    opt = config['optimizer']
    batch_size = config['batch_size']
    val_losses, accs, durations = [], [], []

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    print('Generating transforms')
    M, A, D, U = mesh_operations.generate_transform_matrices(
        template_mesh, config['downsampling_factors'])

    D_t = [scipy_to_torch_sparse(d).to(device) for d in D]
    U_t = [scipy_to_torch_sparse(u).to(device) for u in U]
    A_t = [scipy_to_torch_sparse(a).to(device) for a in A]
    num_nodes = [len(M[i].v) for i in range(len(M))]

    print('Loading Dataset')
    if args.data_dir:
        data_dir = args.data_dir
    else:
        data_dir = config['data_dir']

    normalize_transform = Normalize()

    dataset = ComaDataset(data_dir,
                          dtype='train',
                          split=args.split,
                          split_term=args.split_term,
                          pre_transform=normalize_transform)
    print('Loading model')
    start_epoch = 1
    coma = Coma(dataset, config, D_t, U_t, A_t, num_nodes)
    if opt == 'adam':
        optimizer = torch.optim.Adam(coma.parameters(),
                                     lr=lr,
                                     weight_decay=weight_decay)
    elif opt == 'sgd':
        optimizer = torch.optim.SGD(coma.parameters(),
                                    lr=lr,
                                    weight_decay=weight_decay,
                                    momentum=0.9)
    else:
        raise Exception('No optimizer provided')

    checkpoint_file = config['checkpoint_file']
    print(checkpoint_file)
    if checkpoint_file:
        checkpoint = torch.load(checkpoint_file)
        start_epoch = checkpoint['epoch_num']
        coma.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        #To find if this is fixed in pytorch
        for state in optimizer.state.values():
            for k, v in state.items():
                if isinstance(v, torch.Tensor):
                    state[k] = v.to(device)
    coma.to(device)
    print('making...')
    norm = torch.load('../processed_data/processed/sliced_norm.pt')
    normalize_transform.mean = norm['mean']
    normalize_transform.std = norm['std']

    #'0512','0901','0516','0509','0507','9305','0503','4919','4902',
    files = [
        '0514', '0503', '0507', '0509', '0512', '0501', '0901', '1001', '4902',
        '4913', '4919', '9302', '9305', '12411'
    ]

    coma.eval()

    meshviewer = MeshViewers(shape=(1, 2))
    for file in files:
        #mat = np.load('../Dress Dataset/'+file+'/'+file+'_pose.npz')
        mesh_dir = os.listdir('../processed_data/' + file + '/mesh/')
        latent = []
        print(len(mesh_dir))
        for i in tqdm(range(len(mesh_dir))):
            data_file = '../processed_data/' + file + '/mesh/' + str(
                i) + '.obj'
            mesh = Mesh(filename=data_file)
            adjacency = get_vert_connectivity(mesh.v, mesh.f).tocoo()
            edge_index = torch.Tensor(np.vstack(
                (adjacency.row, adjacency.col))).long()
            mesh_verts = (torch.Tensor(mesh.v) -
                          normalize_transform.mean) / normalize_transform.std
            data = Data(x=mesh_verts, y=mesh_verts, edge_index=edge_index)
            data = data.to(device)
            with torch.no_grad():
                out, feature = coma(data)
                latent.append(feature.cpu().detach().numpy())
            # print(feature.shape)
            if i % 50 == 0:
                expected_out = data.x
                out = out.cpu().detach(
                ) * normalize_transform.std + normalize_transform.mean
                expected_out = expected_out.cpu().detach(
                ) * normalize_transform.std + normalize_transform.mean
                out = out.numpy()
                save_obj(out, template_mesh.f + 1,
                         './vis/reconstruct_' + str(i) + '.obj')
                save_obj(expected_out, template_mesh.f + 1,
                         './vis/ori_' + str(i) + '.obj')

        np.save('./processed/0820/' + file, latent)

    if torch.cuda.is_available():
        torch.cuda.synchronize()
Пример #25
0
import generate_project as gp
import config_parser

r'''
:temporary script for testing
C:\intelFPGA\18.1\quartus\bin64\quartus_map --read_settings_files=on --write_settings_files=off qs2 -c qs2

C:\intelFPGA\18.1\quartus\bin64\quartus_fit --read_settings_files=off --write_settings_files=off qs2 -c qs2
C:\intelFPGA\18.1\quartus\bin64\quartus_asm --read_settings_files=off --write_settings_files=off qs2 -c qs2
C:\intelFPGA\18.1\quartus\bin64\quartus_sta --sdc="qs2.sdc" qs2 --do_report_timing
'''

quartus_bin_dir = r'C:\intelFPGA\18.1\quartus\bin64'

#Checks Config File for custom path, If not found it uses default path above
config_parameters = config_parser.read_config()

for x in config_parameters:
        if "QUARTUS PATH" in x[0].upper():
            quartus_bin_dir = x[1]


def build_all(dir='internal/QuartusWork'):
    pname = gp.project_name

    starttime = dt.datetime.now()

    print(f'\nStarting compilation at {str(starttime)}\n')

    # starting mapping
    exit_code = subprocess.call(
Пример #26
0
def main(args):
    if not os.path.exists(args.conf):
        print('Config not found' + args.conf)

    config = read_config(args.conf)

    print('Initializing parameters')
    template_file_path = config['template_fname']
    template_mesh = Mesh(filename=template_file_path)
    print(template_file_path)

    if args.checkpoint_dir:
        checkpoint_dir = args.checkpoint_dir
        print(os.path.exists(checkpoint_dir))

    else:
        checkpoint_dir = config['checkpoint_dir']
        print(os.path.exists(checkpoint_dir))
    # if not os.path.exists(checkpoint_dir):
    #     os.makedirs(checkpoint_dir)

    visualize = config['visualize']
    output_dir = config['visual_output_dir']
    if visualize is True and not output_dir:
        print(
            'No visual output directory is provided. Checkpoint directory will be used to store the visual results'
        )
        output_dir = checkpoint_dir

    # if not os.path.exists(output_dir):
    #     os.makedirs(output_dir)

    eval_flag = config['eval']
    lr = config['learning_rate']
    lr_decay = config['learning_rate_decay']
    weight_decay = config['weight_decay']
    total_epochs = config['epoch']
    workers_thread = config['workers_thread']
    opt = config['optimizer']
    batch_size = config['batch_size']
    val_losses, accs, durations = [], [], []

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    if torch.cuda.is_available():
        print('\ncuda is available...\n')
    else:
        print('\ncuda is NOT available...\n')
    # device = 'cpu'

    # print('Generating transforms')
    # M, A, D, U = mesh_operations.generate_transform_matrices(template_mesh, config['downsampling_factors'])

    # D_t = [scipy_to_torch_sparse(d).to(device) for d in D]
    # U_t = [scipy_to_torch_sparse(u).to(device) for u in U]
    # A_t = [scipy_to_torch_sparse(a).to(device) for a in A]
    # num_nodes = [len(M[i].v) for i in range(len(M))]

    print('\n*** Loading Dataset ***\n')
    if args.data_dir:
        data_dir = args.data_dir
    else:
        data_dir = config['data_dir']

    print(data_dir)
    normalize_transform = Normalize()
    # normalize_transform = MinMaxScaler()
    dataset = ComaDataset(data_dir,
                          dtype='train',
                          split=args.split,
                          split_term=args.split_term)
    dataset_test = ComaDataset(data_dir,
                               dtype='test',
                               split=args.split,
                               split_term=args.split_term,
                               pre_transform=normalize_transform)

    # dataset = FcadDataset(data_dir, dtype='train', transform=T.NormalizeScale())

    print('Done ......... \n')

    train_loader = DataLoader(dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=workers_thread)
    test_loader = DataLoader(dataset_test,
                             batch_size=1,
                             shuffle=False,
                             num_workers=workers_thread)
Пример #27
0
def daily(topic=None, details=None):
    config = config_parser.read_config()
    alerts = list(config.items('alerts'))
    now = datetime.datetime.now()
    messages = []

    for x in alerts:
        date = str(x[1])
        event_month = int(int(date.split("/")[1]))
        event_day = int(int(date.split("/")[0]))
        each_message = ""
        message_time = ""

        if (int(now.month) == event_month):

            if (int(now.day) == event_day):
                message_time += "Today"

            if ((int(now.day) + 1) == event_day):
                message_time += "Tomorrow"

        if (message_time != ""):
            message_event = ""
            key = str(x[0])
            message_event += "Its " + key.replace("_",
                                                  " ") + " " + message_time
            messages.append(message_event)

    if len(messages) > 0:

        for each_message in messages:
            body = json.dumps({
                "notification": each_message,
                "accessCode": config.get('notify_me', 'api_key')
            })
            requests.post(url="https://api.notifymyecho.com/v1/NotifyMe",
                          data=body)
            headers = {
                "Access-Token": "o.Qiiw4SPAmxx3GbBxZDRO5XpYFcdgtBTq",
                "Content-Type": "application/json"
            }
            body = json.dumps({
                "body": each_message,
                "title": 'Travis Reminder'
            })
            requests.post(url="https://api.pushbullet.com/v2/pushes",
                          data=body,
                          headers=headers)

            ACCESS_TOKEN = config.get('push_bullet', 'access_token')
            headers = {
                'Authorization': 'Bearer ' + ACCESS_TOKEN,
                'Content-Type': 'application/json'
            }
            body = json.dumps({
                'body': each_message,
                'title': 'Travis Reminder',
                'type': 'note'
            })
            response = requests.post(
                url="https://api.pushbullet.com/v2/pushes",
                data=body,
                headers=headers)
            ahlogger.log(response)
            ahlogger.log(each_message)
Пример #28
0
def main(args):
    if not os.path.exists(args.conf):
        print('Config not found' + args.conf)

    config = read_config(args.conf)
    print(colored(str(config), 'cyan'))

    eval_flag = config['eval']

    if not eval_flag:  #train mode : fresh or reload
        current_log_dir = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        current_log_dir = os.path.join('../Experiments/', current_log_dir)
    else:  #eval mode : save result plys
        if args.load_checkpoint_dir:
            current_log_dir = '../Eval'
        else:
            print(
                colored(
                    '*****please provide checkpoint file path to reload!*****',
                    'red'))
            return

    print(colored('logs will be saved in:{}'.format(current_log_dir),
                  'yellow'))

    if args.load_checkpoint_dir:
        load_checkpoint_dir = os.path.join('../Experiments/',
                                           args.load_checkpoint_dir,
                                           'chkpt')  #load last checkpoint
        print(
            colored('load_checkpoint_dir: {}'.format(load_checkpoint_dir),
                    'red'))

    save_checkpoint_dir = os.path.join(current_log_dir, 'chkpt')
    print(
        colored('save_checkpoint_dir: {}\n'.format(save_checkpoint_dir),
                'yellow'))
    if not os.path.exists(save_checkpoint_dir):
        os.makedirs(save_checkpoint_dir)

    print('Initializing parameters')
    template_file_path = config['template_fname']
    template_mesh = Mesh(filename=template_file_path)
    print(template_file_path)

    visualize = config['visualize']
    lr = config['learning_rate']
    lr_decay = config['learning_rate_decay']
    weight_decay = config['weight_decay']
    total_epochs = config['epoch']
    workers_thread = config['workers_thread']
    opt = config['optimizer']
    batch_size = config['batch_size']
    val_losses, accs, durations = [], [], []

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    if torch.cuda.is_available():
        print(colored('\n...cuda is available...\n', 'green'))
    else:
        print(colored('\n...cuda is NOT available...\n', 'red'))

    ds_factors = config['downsampling_factors']
    print('Generating transforms')
    M, A, D, U = mesh_operations.generate_transform_matrices(
        template_mesh, ds_factors)

    D_t = [scipy_to_torch_sparse(d).to(device) for d in D]
    U_t = [scipy_to_torch_sparse(u).to(device) for u in U]
    A_t = [scipy_to_torch_sparse(a).to(device) for a in A]
    num_nodes = [len(M[i].v) for i in range(len(M))]
    print(colored('number of nodes in encoder : {}'.format(num_nodes), 'blue'))

    if args.data_dir:
        data_dir = args.data_dir
    else:
        data_dir = config['data_dir']

    print('*** data loaded from {} ***'.format(data_dir))

    dataset = ComaDataset(data_dir,
                          dtype='train',
                          split=args.split,
                          split_term=args.split_term)
    dataset_test = ComaDataset(data_dir,
                               dtype='test',
                               split=args.split,
                               split_term=args.split_term)
    train_loader = DataLoader(dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=workers_thread)
    test_loader = DataLoader(dataset_test,
                             batch_size=1,
                             shuffle=False,
                             num_workers=workers_thread)

    print("x :\n{} for dataset[0] element".format(dataset[0]))
    print(colored(train_loader, 'red'))
    print('Loading Model : \n')
    start_epoch = 1
    coma = ComaVAE(dataset, config, D_t, U_t, A_t, num_nodes)

    tbSummWriter = SummaryWriter(current_log_dir)

    print_model_summary = False
    if print_model_summary:
        print(coma)

    mrkdwn = str('<pre><code>' + str(coma) + '</code></pre>')
    tbSummWriter.add_text('tag2', mrkdwn, global_step=None, walltime=None)

    #write network architecture into text file
    logfile = os.path.join(current_log_dir, 'coma.txt')
    my_data_file = open(logfile, 'w')
    my_data_file.write(str(coma))
    my_data_file.close()

    if opt == 'adam':
        optimizer = torch.optim.Adam(coma.parameters(),
                                     lr=lr,
                                     weight_decay=weight_decay)
    elif opt == 'sgd':
        optimizer = torch.optim.SGD(coma.parameters(),
                                    lr=lr,
                                    weight_decay=weight_decay,
                                    momentum=0.9)
    else:
        raise Exception('No optimizer provided')

    if args.load_checkpoint_dir:
        #to load the newest saved checkpoint
        to_back = os.getcwd()
        os.chdir(load_checkpoint_dir)
        chkpt_list = sorted(os.listdir(os.getcwd()), key=os.path.getctime)
        os.chdir(to_back)
        checkpoint_file = chkpt_list[-1]

        logfile = os.path.join(current_log_dir, 'loadedfrom.txt')
        my_data_file = open(logfile, 'w')
        my_data_file.write(str(load_checkpoint_dir))
        my_data_file.close()

        print(
            colored(
                '\n\nloading Newest checkpoint : {}\n'.format(checkpoint_file),
                'red'))
        if checkpoint_file:
            checkpoint = torch.load(
                os.path.join(load_checkpoint_dir, checkpoint_file))
            start_epoch = checkpoint['epoch_num']
            coma.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            #To find if this is fixed in pytorch
            for state in optimizer.state.values():
                for k, v in state.items():
                    if isinstance(v, torch.Tensor):
                        state[k] = v.to(device)
    coma.to(device)

    for i, dt in enumerate(train_loader):
        dt = dt.to(device)
        graphstr = pms.summary(coma,
                               dt,
                               batch_size=-1,
                               show_input=True,
                               show_hierarchical=False)
        if print_model_summary:
            print(graphstr)

        print(colored('dt in enumerate(train_loader):{} '.format(dt), 'green'))
        #write network architecture into text file
        logfile = os.path.join(current_log_dir, 'pms.txt')
        my_data_file = open(logfile, 'w')
        my_data_file.write(graphstr)
        my_data_file.close()

        mrkdwn = str('<pre><code>' + graphstr + '</code></pre>')
        tbSummWriter.add_text('tag', mrkdwn, global_step=None, walltime=None)
        break  #for one sample only

    if eval_flag and args.load_checkpoint_dir:
        evaluatedFrom = 'predictedPlys_' + checkpoint_file
        output_dir = os.path.join('../Experiments/', args.load_checkpoint_dir,
                                  evaluatedFrom)  #load last checkpoint
        val_loss = evaluate(coma,
                            test_loader,
                            dataset_test,
                            template_mesh,
                            device,
                            visualize=True,
                            output_dir=output_dir)
        print('val loss', val_loss)
        return

    best_val_loss = float('inf')
    val_loss_history = []

    for epoch in range(start_epoch, total_epochs + 1):
        print("Training for epoch ", epoch)
        print('dataset.len : {}'.format(len(dataset)))

        train_loss = train(coma, train_loader, len(dataset), optimizer, device)
        val_loss = evaluate(coma,
                            test_loader,
                            dataset_test,
                            template_mesh,
                            device,
                            visualize=False,
                            output_dir='')  #train without visualization
        sample_latent_space(coma, epoch, device, template_mesh,
                            current_log_dir)

        tbSummWriter.add_scalar('Loss/train', train_loss, epoch)
        tbSummWriter.add_scalar('Val Loss/train', val_loss, epoch)
        tbSummWriter.add_scalar('learning_rate', lr, epoch)

        print('epoch ', epoch, ' Train loss ', train_loss, ' Val loss ',
              val_loss)
        if val_loss < best_val_loss:
            save_model(coma, optimizer, epoch, train_loss, val_loss,
                       save_checkpoint_dir)
            best_val_loss = val_loss

        val_loss_history.append(val_loss)
        val_losses.append(best_val_loss)

        if opt == 'sgd':
            adjust_learning_rate(optimizer, lr_decay)

    if torch.cuda.is_available():
        torch.cuda.synchronize()

    tbSummWriter.flush()
    tbSummWriter.close()
Пример #29
0
def main(args):
    if not os.path.exists(args.conf):
        print('Config not found' + args.conf)

    config = read_config(args.conf)
    for k in config.keys() :
        print(k, config[k])

    print('Initializing parameters')
    template_file_path = config['template_fname']
    template_mesh = Mesh(filename=template_file_path)

    if args.checkpoint_dir:
        checkpoint_dir = args.checkpoint_dir
    else:
        checkpoint_dir = config['checkpoint_dir']
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    visualize = config['visualize']
    output_dir = config['visual_output_dir']
    if visualize is True and not output_dir:
        print('No visual output directory is provided. Checkpoint directory will be used to store the visual results')
        output_dir = checkpoint_dir

    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    eval_flag = config['eval']
    lr = config['learning_rate']
    lr_decay = config['learning_rate_decay']
    weight_decay = config['weight_decay']
    total_epochs = config['epoch']
    workers_thread = config['workers_thread']
    opt = config['optimizer']
    batch_size = config['batch_size']
    val_losses, accs, durations = [], [], []

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    print('Generating transforms')
    M, A, D, U = mesh_operations.generate_transform_matrices(template_mesh, config['downsampling_factors'])

    D_t = [scipy_to_torch_sparse(d).to(device) for d in D]
    U_t = [scipy_to_torch_sparse(u).to(device) for u in U]
    A_t = [scipy_to_torch_sparse(a).to(device) for a in A]
    num_nodes = [len(M[i].v) for i in range(len(M))]

    print('Loading Dataset')
    if args.data_dir:
        data_dir = args.data_dir
    else:
        data_dir = config['data_dir']

    normalize_transform = Normalize()
    dataset = ComaDataset(data_dir, dtype='train', split=args.split, split_term=args.split_term, pre_transform=normalize_transform)
    dataset_val = ComaDataset(data_dir, dtype='val', split=args.split, split_term=args.split_term, pre_transform=normalize_transform)
    dataset_test = ComaDataset(data_dir, dtype='test', split=args.split, split_term=args.split_term, pre_transform=normalize_transform)
    train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=workers_thread)
    val_loader = DataLoader(dataset_val, batch_size=1, shuffle=True, num_workers=workers_thread)
    test_loader = DataLoader(dataset_test, batch_size=1, shuffle=False, num_workers=workers_thread)

    print('Loading model')
    start_epoch = 1
    coma = Coma(dataset, config, D_t, U_t, A_t, num_nodes)
    if opt == 'adam':
        optimizer = torch.optim.Adam(coma.parameters(), lr=lr, weight_decay=weight_decay)
    elif opt == 'sgd':
        optimizer = torch.optim.SGD(coma.parameters(), lr=lr, weight_decay=weight_decay, momentum=0.9)
    else:
        raise Exception('No optimizer provided')

    checkpoint_file = config['checkpoint_file']
    print(checkpoint_file)
    if checkpoint_file:
        checkpoint = torch.load(checkpoint_file)
        start_epoch = checkpoint['epoch_num']
        coma.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        #To find if this is fixed in pytorch
        for state in optimizer.state.values():
            for k, v in state.items():
                if isinstance(v, torch.Tensor):
                    state[k] = v.to(device)
    coma.to(device)

    if eval_flag:
        val_loss = evaluate(coma, output_dir, test_loader, dataset_test, template_mesh, device, visualize)
        print('val loss', val_loss)
        return

    best_val_loss = float('inf')
    val_loss_history = []

    from datetime import datetime
    current_time = datetime.now().strftime('%b%d_%H-%M-%S')
    log_dir = os.path.join('runs/ae', current_time)
    writer = SummaryWriter(log_dir+'-ds2_lr0.04')

    for epoch in range(start_epoch, total_epochs + 1):
        print("Training for epoch ", epoch)
        train_loss = train(coma, train_loader, len(dataset), optimizer, device)
        val_loss = evaluate(coma, output_dir, val_loader, dataset_val, template_mesh, device, epoch, visualize=visualize)

        writer.add_scalar('data/train_loss', train_loss, epoch)
        writer.add_scalar('data/val_loss', val_loss, epoch)

        print('epoch ', epoch,' Train loss ', train_loss, ' Val loss ', val_loss)
        if val_loss < best_val_loss:
            save_model(coma, optimizer, epoch, train_loss, val_loss, checkpoint_dir)
            best_val_loss = val_loss

        if epoch == total_epochs or epoch % 100 == 0:
            save_model(coma, optimizer, epoch, train_loss, val_loss, checkpoint_dir)

        val_loss_history.append(val_loss)
        val_losses.append(best_val_loss)

        if opt=='sgd':
            adjust_learning_rate(optimizer, lr_decay)

    if torch.cuda.is_available():
        torch.cuda.synchronize()

    writer.close()
Пример #30
0
def main(args):
    if not os.path.exists(args.conf):
        print('Config not found' + args.conf)

    config = read_config(args.conf)

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    torch.set_num_threads(args.num_threads)
    if args.rep_cudnn:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    print('Initializing parameters')
    template_file_path = config['template_fname']
    template_mesh = Mesh(filename=template_file_path)

    if args.checkpoint_dir:
        checkpoint_dir = args.checkpoint_dir
    else:
        checkpoint_dir = config['checkpoint_dir']
    checkpoint_dir = os.path.join(checkpoint_dir, args.modelname)
    print(datetime.datetime.now())
    print('checkpoint_dir', checkpoint_dir)
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    if args.data_dir:
        data_dir = args.data_dir
    else:
        data_dir = config['data_dir']

    visualize = config[
        'visualize'] if args.visualize is None else args.visualize
    output_dir = config['visual_output_dir']
    if output_dir:
        output_dir = os.path.join(output_dir, args.modelname)
    if visualize is True and not output_dir:
        print('No visual output directory is provided. \
        Checkpoint directory will be used to store the visual results')
        output_dir = checkpoint_dir

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if not args.train:
        eval_flag = True
    else:
        eval_flag = config['eval']

    if args.learning_rate:
        config['learning_rate'] = args.learning_rate
    lr = config['learning_rate']
    lr_decay = config['learning_rate_decay']
    weight_decay = config['weight_decay']
    total_epochs = config['epoch']
    workers_thread = config[
        'workers_thread'] if args.num_workers is None else args.num_workers
    opt = config['optimizer']
    batch_size = config['batch_size'] if args.batch is None else args.batch
    val_losses, accs, durations = [], [], []

    if args.device_idx is None:
        device = torch.device(
            "cuda:" +
            str(config['device_idx']) if torch.cuda.is_available() else "cpu")
    elif args.device_idx >= 0:
        device = torch.device(
            "cuda:" +
            str(args.device_idx) if torch.cuda.is_available() else "cpu")
    else:
        device = torch.device("cpu")

    print(config)

    ds_fname = os.path.join(
        './template/',
        data_dir.split('/')[-1] + '_' + args.hier_matrices + '.pkl')
    if not os.path.exists(ds_fname):
        print("Generating Transform Matrices ..")
        M, A, D, U = mesh_operations.generate_transform_matrices(
            template_mesh, config['downsampling_factors'])
        with open(ds_fname, 'wb') as fp:
            M_verts_faces = [(M[i].v, M[i].f) for i in range(len(M))]
            pickle.dump(
                {
                    'M_verts_faces': M_verts_faces,
                    'A': A,
                    'D': D,
                    'U': U
                }, fp)
    else:
        print("Loading Transform Matrices ..")
        with open(ds_fname, 'rb') as fp:
            downsampling_matrices = pickle.load(fp)

        M_verts_faces = downsampling_matrices['M_verts_faces']
        M = [
            Mesh(v=M_verts_faces[i][0], f=M_verts_faces[i][1])
            for i in range(len(M_verts_faces))
        ]
        A = downsampling_matrices['A']
        D = downsampling_matrices['D']
        U = downsampling_matrices['U']

    D_t = [scipy_to_torch_sparse(d).to(device) for d in D]
    U_t = [scipy_to_torch_sparse(u).to(device) for u in U]
    A_t = [scipy_to_torch_sparse(a).to(device) for a in A]
    num_nodes = [len(M[i].v) for i in range(len(M))]

    nV_ref = []
    ref_mean = np.mean(M[0].v, axis=0)
    ref_std = np.std(M[0].v, axis=0)
    for i in range(len(M)):
        nv = 0.1 * (M[i].v - ref_mean) / ref_std
        nV_ref.append(nv)

    tV_ref = [torch.from_numpy(s).float().to(device) for s in nV_ref]

    print('Loading Dataset')

    normalize_transform = Normalize()
    dataset = ComaDataset(data_dir,
                          dtype='train',
                          split=args.split,
                          split_term=args.split_term,
                          pre_transform=normalize_transform)
    dataset_val = ComaDataset(data_dir,
                              dtype='val',
                              split=args.split,
                              split_term=args.split_term,
                              pre_transform=normalize_transform)
    dataset_test = ComaDataset(data_dir,
                               dtype='test',
                               split=args.split,
                               split_term=args.split_term,
                               pre_transform=normalize_transform)

    train_loader = DataLoader(dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=workers_thread)
    val_loader = DataLoader(dataset_val,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=workers_thread)
    test_loader = DataLoader(dataset_test,
                             batch_size=1 if visualize else batch_size,
                             shuffle=False,
                             num_workers=workers_thread)

    print('Loading model')
    start_epoch = 1
    if args.modelname in {'ComaAtt'}:
        gcn_model = eval(args.modelname)(dataset, config, D_t, U_t, A_t,
                                         num_nodes, tV_ref)
        gcn_params = gcn_model.parameters()
    else:
        gcn_model = eval(args.modelname)(dataset, config, D_t, U_t, A_t,
                                         num_nodes)
        gcn_params = gcn_model.parameters()

    params = sum(p.numel() for p in gcn_model.parameters() if p.requires_grad)
    print("Total number of parameters is: {}".format(params))
    print(gcn_model)

    if opt == 'adam':
        optimizer = torch.optim.Adam(gcn_params,
                                     lr=lr,
                                     weight_decay=weight_decay)
    elif opt == 'sgd':
        optimizer = torch.optim.SGD(gcn_params,
                                    lr=lr,
                                    weight_decay=weight_decay,
                                    momentum=0.9)
    else:
        raise Exception('No optimizer provided')

    if args.checkpoint_file:
        checkpoint_file = os.path.join(checkpoint_dir,
                                       str(args.checkpoint_file) + '.pt')
    else:
        checkpoint_file = config['checkpoint_file']
    if eval_flag and not checkpoint_file:
        checkpoint_file = os.path.join(checkpoint_dir, 'checkpoint.pt')

    print(checkpoint_file)
    if checkpoint_file:
        print('Loading checkpoint file: {}.'.format(checkpoint_file))
        checkpoint = torch.load(checkpoint_file, map_location=device)
        start_epoch = checkpoint['epoch_num']
        gcn_model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        for state in optimizer.state.values():
            for k, v in state.items():
                if isinstance(v, torch.Tensor):
                    state[k] = v.to(device)

    gcn_model.to(device)

    if eval_flag:
        val_loss, euclidean_loss = evaluate(gcn_model, output_dir, test_loader,
                                            dataset_test, template_mesh,
                                            device, visualize)
        print('val loss', val_loss)
        print('euclidean error is {} mm'.format(1000 * euclidean_loss))
        return

    best_val_loss = float('inf')
    val_loss_history = []

    for epoch in range(start_epoch, total_epochs + 1):
        print("Training for epoch ", epoch)
        train_loss = train(gcn_model, train_loader, len(dataset), optimizer,
                           device)
        val_loss, _ = evaluate(gcn_model,
                               output_dir,
                               val_loader,
                               dataset_val,
                               template_mesh,
                               device,
                               visualize=visualize)

        print('epoch {}, Train loss {:.8f}, Val loss {:.8f}'.format(
            epoch, train_loss, val_loss))
        if val_loss < best_val_loss:
            save_model(gcn_model, optimizer, epoch, train_loss, val_loss,
                       checkpoint_dir)
            best_val_loss = val_loss

        val_loss_history.append(val_loss)
        val_losses.append(best_val_loss)

        if opt == 'sgd':
            adjust_learning_rate(optimizer, lr_decay)

        if epoch in args.epochs_eval or (val_loss <= best_val_loss and
                                         epoch > int(total_epochs * 3 / 4)):
            val_loss, euclidean_loss = evaluate(gcn_model, output_dir,
                                                test_loader, dataset_test,
                                                template_mesh, device,
                                                visualize)
            print('epoch {} with val loss {}'.format(epoch, val_loss))
            print('euclidean error is {} mm'.format(1000 * euclidean_loss))

    if torch.cuda.is_available():
        torch.cuda.synchronize()