Example #1
0
    def test_accept_quality_factor(self):
        request = urllib2.Request(self.url)
        request.add_header("Accept", "application/xml; q=0.8, application/json; q=0.2")
        response = urllib2.urlopen(request)
        self.assertEquals(response.headers["Content-Type"], "application/xml")
        xmlData = parseXml(response)
        self.assertValidXml(xmlData)

        request.add_header("Accept", "application/xml; q=0.2, application/json; q=0.8")
        response = urllib2.urlopen(request)
        self.assertEquals(response.headers["Content-Type"], "application/json")
        jsonData = loadjson(response)
        self.assertValidJson(jsonData)

        request.add_header("Accept", "application/xml, application/json; q=0.8")
        response = urllib2.urlopen(request)
        self.assertEquals(response.headers["Content-Type"], "application/xml")
        xmlData = parseXml(response)
        self.assertValidXml(xmlData)

        request.add_header("Accept", "application/fakemimetype, application/json; q=0.8")
        response = urllib2.urlopen(request)
        self.assertEquals(response.headers["Content-Type"], "application/json")
        jsonData = loadjson(response)
        self.assertValidJson(jsonData)
Example #2
0
 def ReadSettings(self):
     if exists('settings.json'):
         try:
             with open('settings.json', 'r', encoding='utf8') as f:
                 self.__data = loadjson(f)
         except:
             self.__data = None
     elif exists('../settings.json'):
         try:
             with open('../settings.json', 'r', encoding='utf8') as f:
                 self.__data = loadjson(f)
         except:
             self.__data = None
     else:
         self.__data = None
Example #3
0
 def send_scrapy_query(self, action, arguments=None):
     url = "%s%s.json" % (self.scrapyd, action)
     method = "POST"
     headers = None
     if action.startswith('list'):
         method = "GET"
         if arguments:
             args = [
                 str(k) + '=' + str(v) for (k, v) in arguments.iteritems()
             ]
             url += '?' + '&'.join(args)
             arguments = None
     elif arguments:
         arguments = urlencode(arguments)
         headers = {'Content-Type': 'application/x-www-form-urlencoded'}
     try:
         res = yield getPage(url, method=method, postdata=arguments, \
           headers=headers, timeout=30)
         result = loadjson(res)
         returnD(result)
     except ConnectionRefusedError:
         returnD(format_error("Could not contact scrapyd server, " + \
           "maybe it's not started..."))
     except Exception as e:
         returnD(format_error(e))
Example #4
0
 def test_json(self):
     request = urllib2.Request(self.url)
     request.add_header("Accept", "application/json")
     response = urllib2.urlopen(request)
     self.assertEquals(response.headers["Content-Type"], "application/json")
     jsonData = loadjson(response)
     self.assertValidJson(jsonData)
Example #5
0
def __load_config(path="config.json", skip_auto_generation=False):
    """
    加载配置
    """
    global __config, config_modified_time
    try:
        with open(path) as configfile:
            __config = loadjson(configfile)
            __config["config_modified_time"] = stat(path).st_mtime
    except IOError:
        if skip_auto_generation:
            __config["config_modified_time"] = time()
            return
        error(' Config file `%s` does not exist!' % path)
        with open(path, 'w') as configfile:
            configure = {
                "$schema": "https://ddns.newfuture.cc/schema/v2.8.json",
                "id": "YOUR ID or EMAIL for DNS Provider",
                "token": "YOUR TOKEN or KEY for DNS Provider",
                "dns": "dnspod",
                "ipv4": ["newfuture.cc", "ddns.newfuture.cc"],
                "ipv6": ["newfuture.cc", "ipv6.ddns.newfuture.cc"],
                "index4": "default",
                "index6": "default",
                "ttl": None,
                "proxy": None,
                "debug": False,
            }
            dumpjson(configure, configfile, indent=2, sort_keys=True)
            sys.stdout.write(
                "New template configure file `%s` is generated.\n" % path)
            sys.exit(1)
    except:
        sys.exit('fail to load config from file: %s' % path)
Example #6
0
File: run.py Project: json9666/DDNS
def get_config(key=None, default=None, path="config.json"):
    """
    读取配置
    """
    if not hasattr(get_config, "config"):
        try:
            with open(path) as configfile:
                get_config.config = loadjson(configfile)
                get_config.time = stat(path).st_mtime
        except IOError:
            error(' Config file `%s` does not exist!' % path)
            with open(path, 'w') as configfile:
                configure = {
                    "$schema": "https://ddns.newfuture.cc/schema/v2.8.json",
                    "id": "YOUR ID or EMAIL for DNS Provider",
                    "token": "YOUR TOKEN or KEY for DNS Provider",
                    "dns": "dnspod",
                    "ipv4": ["newfuture.cc", "ddns.newfuture.cc"],
                    "ipv6": ["newfuture.cc", "ipv6.ddns.newfuture.cc"],
                    "index4": "default",
                    "index6": "default",
                    "ttl": None,
                    "proxy": None,
                    "debug": False,
                }
                dumpjson(configure, configfile, indent=2, sort_keys=True)
            sys.stdout.write(
                "New template configure file `%s` is generated.\n" % path)
            sys.exit(1)
        except:
            sys.exit('fail to load config from file: %s' % path)
    if key:
        return get_config.config.get(key, default)
    else:
        return get_config.config
Example #7
0
def write_production_config(environ_json):
    production_file = 'application/production.py'
    with open(environ_json, 'r') as envfp, cd('~/charger-web-backend'):
        environ = loadjson(envfp)
        for k, v in environ.items():
            run(r'''sed -i -e 's/^\(%s *=.*\)$/# \1/' "%s"''' %
                (k, production_file))
            run(r'''echo "%s = %s" >> "%s"''' %
                (k, repr(v).replace('"', r'\"'), production_file))
def _parseJsonList(manager, response, context, path):
    fileList = []
    jsonResponse = loadjson(response)
    for name, jsonData in list(jsonResponse.items()):
        checksum = jsonData["checksum"]
        timestampString = jsonData["timestamp"]
        timestamp = dateutil.parser.parse(timestampString)
        newpath = urljoin(path, name)
        fileList.append(LocalizationFile(manager, context, newpath, checksum, timestamp))
    return fileList
def _parseJsonList(manager, response, context, path):
    fileList = []
    jsonResponse = loadjson(response)
    for name, jsonData in list(jsonResponse.items()):
        checksum = jsonData["checksum"]
        timestampString = jsonData["timestamp"]
        timestamp = dateutil.parser.parse(timestampString)
        newpath = urljoin(path, name)
        fileList.append(
            LocalizationFile(manager, context, newpath, checksum, timestamp))
    return fileList
def ModelGrabber(contrasts_file, events_file, confounds_file):

    from os import environ
    import numpy as np
    import pandas as pd
    from nipype.interfaces.base import Bunch
    from os.path import join as opj
    from json import load as loadjson

    # Project dir
    project = 'OPUS'
    project_dir = opj(environ['PI_SCRATCH'], project, 'BIDS_data')

    ### Load data ###
    read_tsv = lambda f: pd.read_csv(opj(project_dir, f), sep='\t', index_col=None)
    model = read_tsv(events_file)
    all_confounds = read_tsv(confounds_file)

    ### Confounds ###
    confound_names = ['trans_x', 'trans_y', 'trans_z', 'rot_x', 'rot_y', 'rot_z',
                      'global_signal', 'framewise_displacement',
                      'a_comp_cor_00', 'a_comp_cor_01', 'a_comp_cor_02',
                      'a_comp_cor_03', 'a_comp_cor_04', 'a_comp_cor_05']
    confounds_df = all_confounds.loc[:,confound_names]
    confounds_df.framewise_displacement = confounds_df.framewise_displacement.fillna(0)
    confounds_dict = confounds_df.to_dict('list')

    # Convert confounds to dictionary
    confounds = {'regressor_names': confound_names,
                'regressors': [confounds_dict[name] for name in confound_names]}

    ### Model specification ###
    modelspec_dict = model.copy().rename(index=str,
                                         columns={'trial_type': 'conditions',
                                                  'onset': 'onsets',
                                                  'duration': 'durations',
                                                  'amplitude': 'amplitudes'})
    modelspec_dict = modelspec_dict.groupby('conditions').aggregate(lambda g: list(g)).reset_index().to_dict('list')
    modelspec_dict.update(confounds)
    if 'amplitudes' in modelspec_dict:
        modelspec_dict['amplitudes'] = [a if not all(np.isnan(a)) else np.ones(np.size(a)) for a in modelspec_dict['amplitudes']]
    modelspec = Bunch(**modelspec_dict)
    
    ### Contrasts ###
    with open(opj(project_dir, contrasts_file), 'r') as contrast_handle:
        contrasts = loadjson(contrast_handle)

    return modelspec, contrasts
Example #11
0
def chat():
	if 'accesskey' not in session:
		if 'error_reason' in request.args:
			return 'You must login via Facebook to use our chat!'
		elif 'code' in request.args:
			resp = ''
			with urlopen('https://graph.facebook.com/v2.3/oauth/access_token?client_id=%s&redirect_uri=http://wtc.codeguild.co/chat&client_secret=%s&code=%s' % (fb_appid, fb_secret, request.args['code'])) as r:
				resp = r.read()
			j = loadjson(resp.decode("utf-8"))
			if 'access_token' in j:
				session['accesskey'] = j['access_token']
				return redirect('/chat')
			else:
				return 'An error has occured, please try again later'
		else:
			return redirect('https://www.facebook.com/dialog/oauth?client_id=%s&redirect_uri=http://wtc.codeguild.co/chat&response_type=code' % fb_appid)
	return render_template('chat.html')
Example #12
0
def connect():
	if 'accesskey' not in session:
		return False
	resp = ''
	with urlopen('https://graph.facebook.com/v2.3/me?client_id=%s&client_secret=%s&access_token=%s' % (fb_appid, fb_secret, session['accesskey'])) as r:
		resp = r.read()
	j = loadjson(resp.decode("utf-8"))
	session['displayname'] = j['name']
	emit('message', {
		'room': 'broadcast',
		'msg': 'Welcome to Will Coates\' Chat',
		'role': 'notice'
	})
	emit('message', {
		'room': 'willcoates',
		'msg': escape('%s has joined the chat!' % session['displayname']),
		'role': 'notice'
	}, broadcast=True)
    def from_profile(cls, name: str, path: Optional[Path] = None) -> "Client":
        """
        Attempts the create a :py:class:`Client` instance for the given profile name.

        :param name: Profile name.
        :param path: Optional path to the configuration file (default is ``~/.decaf.json``).
        :return: A :py:class:`Client` instance.
        :raises FileNotFoundError: In case that the configuration file is not found.
        :raises KeyError: In case that the profile is not found.
        """
        ## If we don't have a configuration path, use the default:
        if path is None:
            path = Path.home() / ".decaf.json"

        ## Attempt to read in the configuration:
        with path.open() as ifile:
            profile = {p["name"]: p for p in loadjson(ifile)["profiles"]}[name]

        ## Build the client and return:
        return Client(profile["url"], APIKeyAuthorization(key=profile["key"], scr=profile["secret"]))
Example #14
0
def updateDomainRecord(recordID, ip, RR, Typekey='A'):
    p1 = deepcopy(params)
    t = time.gmtime()
    p1.update(
        dict(Action='UpdateDomainRecord',
             RecordId='%s' % recordID,
             RR='%s' % RR,
             Type='%s' % Typekey,
             Value='%s' % ip,
             TTL=600,
             Line='default',
             Timestamp=time.strftime('%Y-%m-%dT%H:%M:%SZ', t),
             SignatureNonce=str(uuid1())))

    if Typekey == 'mx':
        p1['Priority'] = 5

    p1['Signature'] = sign(p1)
    rurl = url + '/?' + urlencode(p1)
    logging.info(rurl)

    try:
        getjson = urlopen(rurl).read().decode('utf-8')
        logging.debug(getjson)
    except Exception as e:
        logging.error(str(e))
        logging.error('Fail to update Record. Due to request failed.')
        return False

    json = loadjson(getjson)
    try:
        if json['Code']:
            logging.error(
                    '[' + json['Code'] + \
                    ']Error occurred during updating record.'
                    )
            return False
    except Exception as e:
        logging.info('Update successful!')
        return True
Example #15
0
 def send_scrapy_query(self, action, arguments=None):
     url = "%s%s.json" % (self.scrapyd, action)
     method = "POST"
     headers = None
     if action.startswith('list'):
         method = "GET"
         if arguments:
             args = [str(k)+'='+str(v) for (k, v) in arguments.iteritems()]
             url += '?' + '&'.join(args)
             arguments = None
     elif arguments:
         arguments = urlencode(arguments)
         headers = {'Content-Type': 'application/x-www-form-urlencoded'}
     try:
         res = yield getPage(url, method=method, postdata=arguments, \
           headers=headers, timeout=30)
         result = loadjson(res)
         returnD(result)
     except ConnectionRefusedError:
         returnD(format_error("Could not contact scrapyd server, " + \
           "maybe it's not started..."))
     except Exception as e:
         returnD(format_error(e))
async def queue_downloads(url):
    title = urlparse(url).query.split('volumeNo=')[1].split('&')[0]
    desired_path = Path.cwd() / title
    desired_path.mkdir(parents=False, exist_ok=True)
    headers = {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0'
    }
    async with aiohttp.ClientSession(headers=headers) as session:
        async with session.get(url) as response:
            text = await response.text()
            for item in text.split("data-linkdata=\'")[1:]:
                linkdata = loadjson(item.split("\'>\n")[0])
                if 'src' in linkdata:
                    picture_url = linkdata['src']
                    picture_id = unquote(
                        urlparse(picture_url).path.split('/')[-1])
                    picture_name = re.sub('[<>:\"/|?*]', ' ',
                                          picture_id).strip()
                    picture_path = desired_path / picture_name
                    if not picture_path.is_file():
                        await download(session, picture_url, picture_path)
                else:
                    print(f"Error string does not include 'src' {linkdata}")
Example #17
0
def getDomainList(RRKey='', TypeKey='A', ValueKey=''):
    global DomainName
    p1 = deepcopy(params)
    t = time.gmtime()
    p1.update(
        dict(Action='DescribeDomainRecords',
             DomainName='%s' % topDomainName(DomainName),
             PageNumber=1,
             PageSize=500,
             RRKeyWord='%s' % RRKey,
             TypeKeyWord='%s' % TypeKey,
             ValueKeyWord='%s' % ValueKey,
             Timestamp=time.strftime('%Y-%m-%dT%H:%M:%SZ', t),
             SignatureNonce=str(uuid1())))
    p1.update(dict(Signature=sign(p1)))
    rurl = url + '/?' + urlencode(p1)

    try:
        getjson = urlopen(rurl).read().decode('utf-8')
        logging.debug(getjson)
    except Exception as e:
        logging.error(
                '[' + str(e) + \
                ']Fail to get resolve list. Due to request failed.')
        return {}

    domainlist = dict()
    json = loadjson(getjson)
    try:
        for record in json['DomainRecords']['Record']:
            domainlist[record['RR']] = [record['RecordId'], record['Value']]
    except Exception as e:
        logging.error('[' + str(e) + '] Empty Domain List!')
        return {}

    return domainlist
Example #18
0
def genNicoVideoPara(data: dict) -> dict:
    r = {}
    delivery = data['media']['delivery']
    movie = delivery['movie']
    availableVideos = [
        video for video in movie['videos'] if video['isAvailable'] is True
    ]
    availableAudios = [
        audio for audio in movie['audios'] if audio['isAvailable'] is True
    ]
    session = movie["session"]
    token = loadjson(session['token'])
    r['recipe_id'] = session['recipeId']
    r['content_id'] = token['content_ids'][0]
    r['content_type'] = "movie"
    contsrc = []
    vinfoids = [i['id'] for i in availableVideos]
    for vinfo in availableVideos:
        vsrcs = vinfoids[vinfoids.index(vinfo['id']):]
        asrcs = [
            findAudioIndex(
                availableAudios,
                vinfo['metadata']['recommendedHighestAudioLevelIndex'])
        ]
        contsrc.append({
            "src_id_to_mux": {
                "video_src_ids": vsrcs,
                "audio_src_ids": asrcs
            }
        })
    r['content_src_id_sets'] = [{"content_src_ids": contsrc}]
    r['timing_constraint'] = 'unlimited'
    r['keep_method'] = {"heartbeat": {"lifetime": token['heartbeat_lifetime']}}
    r['protocol'] = {
        "name": token['protocols'][0]['name'],
        "parameters": {
            "http_parameters": {
                "parameters": {
                    "hls_parameters": {
                        "use_well_known_port": "yes",
                        "use_ssl": "yes",
                        "transfer_preset": "",
                        "segment_duration": 6000
                    }
                }
            }
        }
    }
    r['content_uri'] = ""
    r['session_operation_auth'] = {
        "session_operation_auth_by_signature": {
            "token": session['token'],
            "signature": session["signature"]
        }
    }
    r['content_auth'] = {
        "auth_type": token['protocols'][0]['auth_type'],
        "content_key_timeout": token['content_key_timeout'],
        "service_id": token['service_id'],
        "service_user_id": token["service_user_id"]
    }
    r['client_info'] = {"player_id": token["player_id"]}
    r['priority'] = round(token['priority'], 1)
    return {"session": r}
Example #19
0
    channel = 3
    batch_size = 16
    nb_epoch = 15
    img_rows, img_cols = 224, 224  # Resolution of inputs

    setname = sys.argv[1]
    threshold = sys.argv[2]
    step = sys.argv[3]
    if len(sys.argv) <= 4:
        root_dir = "/media/intel/m2/train_test_data"
    else:
        root_dir = sys.argv[4]
    tr_test_path = os.path.join(root_dir, step, setname, threshold, 'labels')
    with open(os.path.join(tr_test_path, 'class_name.json')) as fp:
        class_name = loadjson(fp)

    (num_classes, train_size, test_size, train_batches_count,
     test_batches_count,
     Y_test) = load_batch_data.statistics_precal(tr_test_path,
                                                 batch_size=batch_size)

    train_generator = load_batch_data.load_data(tr_test_path,
                                                target_size=(img_rows,
                                                             img_cols),
                                                data_type='train',
                                                num_classes=num_classes,
                                                batch_size=batch_size,
                                                shuffle=True)

    # Load fine-tuned model if there is one
Example #20
0
def parse_json_in_str(data):
    # parse json and convert everything from unicode to str
    for st in ["\n", "\t", "\r"]:
        data = data.replace(st, "")
    return loadjson(data, object_hook=_decode_dict)
Example #21
0
def load_settings(process_markers=True):
    # first get the path of the settings file
    settings_filepath, settings_filename = find_settings_file()
    
    if settings_filepath:
        with open(join(settings_filepath, settings_filename)) as file:
            settings = loadjson(file)
            logging.info("settings file loaded.")
            settings['project_base_dir'] = settings_filepath
    else:
        logging.info("no settings file found.")
    
#    settings = loadjson(filepath)

    markers = {'hauptmaschine': '_home',
               'hero\d*': '_hero',
               'mpc.*': '_hero',
               'vxs\d*': '_hero'}
               
    if process_markers:
        # process "special" entries (ending on "_hero"/"_work")
        remaining_items = settings.items()
        keys_to_pop = []
        new_entries = {}
        for index, (key, value) in enumerate(remaining_items):
            if key in keys_to_pop:
                continue
    
            b_markers_found = [False] * len(markers)
            
            if key.endswith(tuple(markers.values())):
                # the current key ends with one of the markers
    
                # try to find all markers
                for idx_marker, (marker_name, marker) in enumerate(markers.items()):
                    for key2, value2 in remaining_items:
                        pos_marker_start = key2.find(marker)
    
                        if pos_marker_start != -1:
                            b_markers_found[idx_marker] = True
    
                            break
    
                    if all(b_markers_found):
                        break
    
                if all(b_markers_found):
                    # find which marker we have
                    for idx_marker, (marker_name, marker) in enumerate(markers.items()):
                        pos_marker_start = key.find(marker)
    
                        if pos_marker_start != -1:
                            # found_marker = marker
    
                            basename = key[:pos_marker_start]
    
                    # pick the value from the settings list
                    # 1. find the marker that applies here
                    b_match = False
                    for marker_key, marker_value in markers.items():
                        b_match = re.match(marker_key + '$', gethostname())
                        if b_match:
                            break
                        
                    if b_match:
                        new_key = basename
                        new_value = settings[basename + marker_value]
        
                        new_entries[new_key] = new_value
        
                        for key_to_pop in [basename + marker for marker in set(markers.values())]:
                            keys_to_pop.append(key_to_pop)
                    else:
                        logging.info("marker \"" + basename + "\" seems to be valid, but none of the hostnames (" + str(markers.keys()) + ") can be found.")
                        logging.info("(the current hostname is \"" + gethostname() + "\".")
                        
        for key_to_pop in keys_to_pop:
            settings.pop(key_to_pop)
    
        # add new entries
        for new_key, new_value in new_entries.items():
            settings[new_key] = new_value

    return settings
Example #22
0
def getConfig(names = 'config/*.json'):
	result = {}
	for name in glob(names):
		with open(name) as file:
			result = merge(result, loadjson(file))
	return result
Example #23
0
def alex_tr_pred(setname='Agora',
                 threshold='10',
                 step_='pseudo_pairing',
                 root_dir='/media/intel/m2/train_test_data/',
                 tmp_file='pretrained_models/Alexnet_tmpfile'):

    # Loading class names
    tr_test_path = os.path.join(root_dir, step_, setname, str(threshold),
                                'labels')
    with open(os.path.join(tr_test_path, 'class_name.json')) as fp:
        class_name = loadjson(fp)

    # Path to the textfiles for the training and testing set
    train_file = os.path.join(tr_test_path, 'train.txt')
    test_file = os.path.join(tr_test_path, 'test.txt')

    # Learning params
    learning_rate = 0.01
    num_epochs = 30
    batch_size = 128

    # Network params
    dropout_rate = 0.5
    num_classes = len(class_name)
    train_layers = ['fc8', 'fc7', 'fc6']

    # How often we want to write the tf.summary data to disk
    display_step = 20

    # Path for tf.summary.FileWriter and to store model checkpoints
    filewriter_path = os.path.join(tmp_file, "tensorboard")
    checkpoint_path = os.path.join(tmp_file, "checkpoints")

    try:
        os.makedirs(filewriter_path)
    except:
        pass
    try:
        os.makedirs(checkpoint_path)
    except:
        pass
    """
    Main Part of the finetuning Script.
    """

    # Create parent path if it doesn't exist
    if not os.path.isdir(checkpoint_path):
        os.mkdir(checkpoint_path)

    # Place data loading and preprocessing on the cpu
    with tf.device('/cpu:0'):
        tr_data = ImageDataGenerator(train_file,
                                     mode='training',
                                     batch_size=batch_size,
                                     num_classes=num_classes,
                                     shuffle=True)
        test_data = ImageDataGenerator(test_file,
                                       mode='inference',
                                       batch_size=batch_size,
                                       num_classes=num_classes,
                                       shuffle=True)

        # create an reinitializable iterator given the dataset structure
        iterator = Iterator.from_structure(tr_data.data.output_types,
                                           tr_data.data.output_shapes)
        next_batch = iterator.get_next()

    # Ops for initializing the two different iterators
    training_init_op = iterator.make_initializer(tr_data.data)
    test_init_op = iterator.make_initializer(test_data.data)

    # TF placeholder for graph input and output
    x = tf.placeholder(tf.float32, [batch_size, 227, 227, 3])
    y = tf.placeholder(tf.float32, [batch_size, num_classes])
    keep_prob = tf.placeholder(tf.float32)

    # Initialize model
    model = AlexNet(x, keep_prob, num_classes, train_layers)

    # Link variable to model output
    score = model.fc8
    softmax = tf.nn.softmax(score)
    # List of trainable variables of the layers we want to train
    var_list = [
        v for v in tf.trainable_variables()
        if v.name.split('/')[0] in train_layers
    ]

    # Op for calculating the loss
    with tf.name_scope("cross_ent"):
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=score, labels=y))

    # Train op
    with tf.name_scope("train"):
        # Get gradients of all trainable variables
        gradients = tf.gradients(loss, var_list)
        gradients = list(zip(gradients, var_list))

        # Create optimizer and apply gradient descent to the trainable variables
        optimizer = tf.train.GradientDescentOptimizer(learning_rate)
        train_op = optimizer.apply_gradients(grads_and_vars=gradients)

    # Add gradients to summary
    for gradient, var in gradients:
        tf.summary.histogram(var.name + '/gradient', gradient)

    # Add the variables we train to the summary
    for var in var_list:
        tf.summary.histogram(var.name, var)

    # Add the loss to summary
    tf.summary.scalar('cross_entropy', loss)

    # Merge all summaries together
    merged_summary = tf.summary.merge_all()

    # Initialize the FileWriter
    writer = tf.summary.FileWriter(filewriter_path)

    # Initialize an saver for store model checkpoints
    saver = tf.train.Saver()

    # Get the number of training/validation steps per epoch
    train_batches_per_epoch = int(np.floor(tr_data.data_size / batch_size))
    test_batches_per_epoch = int(np.ceil(test_data.data_size / batch_size))

    # Start Session
    sess = tf.Session()

    sess.run(tf.global_variables_initializer())

    # Add the model graph to TensorBoard
    writer.add_graph(sess.graph)

    # Load the pretrained weights into the non-trainable layer
    model.load_initial_weights(sess)

    print("{} Start training...".format(datetime.now()))
    print("{} Open Tensorboard at --logdir {}".format(datetime.now(),
                                                      filewriter_path))

    # Loop over number of epochs
    for epoch in range(num_epochs):

        print("Epoch number: %d" % (epoch + 1))

        # Initialize iterator with the training dataset
        sess.run(training_init_op)

        for step in range(train_batches_per_epoch):

            # get next batch of data
            img_batch, label_batch = sess.run(next_batch)

            # And run the training op
            sess.run(train_op,
                     feed_dict={
                         x: img_batch,
                         y: label_batch,
                         keep_prob: dropout_rate
                     })

            # Generate summary with the current batch of data and write to file
            if step % display_step == 0:
                s = sess.run(merged_summary,
                             feed_dict={
                                 x: img_batch,
                                 y: label_batch,
                                 keep_prob: 1.
                             })

                writer.add_summary(s, epoch * train_batches_per_epoch + step)

        print("Saving checkpoint of model")

        # save checkpoint of the model
        checkpoint_name = os.path.join(
            checkpoint_path, 'model_epoch' + str(epoch + 1) + '.ckpt')
        save_path = saver.save(sess, checkpoint_name)

        print("Checkpoint saved at {}".format(datetime.now(), checkpoint_name))
    #
    #
    # Prediction

    final_pred = {}
    sess.run(test_init_op)
    for j in range(test_batches_per_epoch):
        img_batch, label_batch = sess.run(next_batch)
        if j == 0:
            _temp_img_batch, _temp_label_batch = img_batch, label_batch
        elif j == test_batches_per_epoch - 1:
            img_batch = np.concatenate((img_batch, _temp_img_batch),
                                       axis=0)[:batch_size]
        probs = sess.run(softmax, feed_dict={
            x: img_batch,
            keep_prob: 1
        })[:len(label_batch)]
        for i in range(len(label_batch)):
            img_class = list(label_batch[i]).index(1.)
            if img_class not in final_pred:
                final_pred[img_class] = []
            final_pred[img_class].append(list(probs[i]))

    final_pred_path = os.path.join(tr_test_path, '../final_pred', 'AlexNet')
    try:
        os.makedirs(final_pred_path)
    except:
        pass
    with open(os.path.join(final_pred_path, 'prob.plk'), 'wb') as fp:
        pickle.dump(final_pred, fp)

    test_ct = len(final_pred.keys())
    corr_ct = 0.
    for k in final_pred.keys():
        pred_class = np.argmax(np.array(final_pred[k]).mean(axis=0))
        if k == pred_class:
            corr_ct += 1
        else:
            print("%s <xxx> %s" %
                  (class_name[str(k)], class_name[str(pred_class)]))
    print(test_ct, int(corr_ct), corr_ct * 100. / test_ct)

    sess.close()