Exemplo n.º 1
0
    def __init__(self, FLAGS):

        self.FLAGS = FLAGS
        self.logger = get_logger(os.path.join(self.FLAGS.model_dir, "log.txt"))
        self.session = None
        self.saver = None

        self.overwrite_hyperparams()
Exemplo n.º 2
0
from motion_python_api import take_snapshot
from logger_utils import get_logger
import yaml
import shutil
import os
from time import sleep

config = yaml.safe_load(open("config.yml"))
logger = get_logger()

def take_picture(job = 'test', num=0):
    ## NEED TO DO:
    # Check Vibration
    # Median Stacking
    # Start up motion server if it isn't running
    # Check to make sure photo is acceptable
    location = config['picture_directory'].format(job)
    take_snapshot()
    if not os.path.exists(location):
        os.makedirs(location)
    try:
        shutil.copy('/tmp/motion/lastsnap.jpg', location + '/{}_{}.jpg'.format(job, num))
        logger.info('Took a picture')
    except FileNotFoundError:
        sleep(.2)
        try:
            shutil.copy('/tmp/motion/lastsnap.jpg', location + '/{}_{}.jpg'.format(job, num))
            logger.info('Took a picture')
        except FileNotFoundError:
            logger.error('FAILED TO TAKE picture')
Exemplo n.º 3
0
#!/usr/bin/env python3
# Uploader Server Program
import logger_utils
import ftplib
import os
import requests
from yaml import safe_load
from time import sleep
from multiprocessing import Pool, Value
from polling_server import polling_server

config = safe_load(open("config.yml"))
logger = logger_utils.get_logger()


def upload_process(args):

    # Counter is a multiprocessing Value used to keep track of uploading progress
    global counter

    files = args[0]
    job = args[1]
    # print('I am an upload process!!')
    # print('my files are:{}'.format(files))
    ip = config['remote_ip']
    user = config['remote_user']
    passwd = config['remote_passwd']
    # print('Starting my ftp client')
    ftp = ftplib.FTP(ip)
    ftp.login(user, passwd)
    # print('ftp client started!!')
Exemplo n.º 4
0
# -*- coding: utf-8 -*-

import re
from datetime import date

from apps.common.models import BPAUniqueID, BPAProject
import logger_utils


BPA_ID = "102.100.100"
INGEST_NOTE = "Ingested from GoogleDocs on {0}".format(date.today())

logger = logger_utils.get_logger(__name__)


def ingest_bpa_ids(data, project_key, project_name):
    """
    The BPA ID's are unique
    """

    id_set = set()
    for e in data:
        if isinstance(e, dict):
            bpa_id = e['bpa_id'].strip()
        elif isinstance(e, tuple):
            if e.bpa_id is not None:
                bpa_id = e.bpa_id.strip()
            else:
                continue
        if BPAIdValidator(bpa_id).is_valid():
            id_set.add(bpa_id)
Exemplo n.º 5
0
import pickle
import re
from datetime import datetime, timedelta
import time
from logging import INFO, DEBUG
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException

from logger_utils import get_logger

logger = get_logger('browser', INFO)

chrome_options = webdriver.ChromeOptions()
# chrome_options.add_argument('--proxy-server=socks5://3.12.34.63:7778')
chrome_options.add_argument('--window-size=1600,700')

allowed_ports = [
    'Donsak', 'Phangan', 'Samui', 'Tao', 'Chumpon', 'Chumphon', 'Surat Thani'
]  # Пока не используется
allowed_ports_lower = [p.lower() for p in allowed_ports]

# Названия источников для парсинга
sources = ['Raja', 'Lomprayah', 'Songserm', 'Seatran']


def chrome_options_no_gui(no_gui=True):
    if no_gui:
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--headless')
        chrome_options.add_argument('--disable-gpu')
Exemplo n.º 6
0
@author: Gaivin Wang 
@license: Apache Licence  
@contact: [email protected]
@site:  
@software: PyCharm 
@file: esutils.py 
@time: 5/13/2019 3:59 PM 
"""

from csv import DictReader
import datetime, os
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import re
from logger_utils import get_logger
logger = get_logger(logger_name="ESUtils")

class ESUtils():
    def __init__(self, hosts):
        self.es = Elasticsearch(hosts=hosts)

    def import_from_csv(self, csv_file, index_name, timestamp=None, filed_format=None, **addition_kwargs):
        if not os.path.exists(csv_file):
            logger.error("%s file not found" % csv_file)
            raise Exception("Cannot find the csv file %s" % csv_file)
        actions = []
        if not self.es.indices.exists(index=index_name, allow_no_indices=True):
            logger.warn("Index is not found")
            self.es.indices.create(index=index_name, body={}, ignore=400)
        if timestamp is None:
            timestamp = datetime.datetime.now()
Exemplo n.º 7
0
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with [email protected]

__author__ = 'jfernandez'


import uuid
from pkg_resources import resource_string
from logger_utils import get_logger
from constants import TRANSACTION_ID_PATTERN, RESOURCES_SAMPLEDATA_MODULE, RESOURCES_PARAMETER_PATTERN

logger = get_logger("utils")


def generate_transaction_id():
    """
    Generate a transaction ID value following defined pattern.
    :return: New transactionId
    """

    return TRANSACTION_ID_PATTERN.format(uuid=uuid.uuid4())


def get_probe_data_from_resource_file(filename, replacement_values=None):
    """
    Get probe data from resource files. If replacement_values is not empty,
    :param filename: Resource filename to be used for loading probe data
Exemplo n.º 8
0
#!/usr/bin/env python3
# Echo server program
import socket
import yaml
import logger_utils as utils
import json
from time import sleep
from sys import exit
from flask import Flask, send_from_directory
from flask_restful import reqparse, abort, Api, Resource
import logging

config = yaml.safe_load(open("config.yml"))
logger = utils.get_logger()
HOST = config['host']
PORTS = config['ports']

app = Flask(__name__)
api = Api(app)

parser = reqparse.RequestParser()
for i in config['parser_arguments']:
    parser.add_argument(i)

log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
### THIS IS ALL TRASH 
### Literally just a wrapper around Master. Should be done much better
### Ignore this file I'm going to try to replace it in the next few days.

def talk_to_master(args):
Exemplo n.º 9
0
# contact with [email protected]

__author__ = 'jfernandez'

import requests
import xmltodict
import xmldict
from json import JSONEncoder
from constants import HEADER_REPRESENTATION_JSON, HEADER_REPRESENTATION_XML, HTTP_VERB_POST, HTTP_VERB_DELETE, \
    HTTP_VERB_GET, HTTP_VERB_PUT, HTTP_VERB_UPDATE
from logger_utils import get_logger, log_print_request, log_print_response

API_ROOT_URL_ARG_NAME = 'api_root_url'
URL_ROOT_PATTERN = "{protocol}://{host}:{port}"

logger = get_logger("rest_client_utils")


class RestClient(object):

    api_root_url = None

    def __init__(self, protocol, host, port, resource=None):
        """
        This method init the RestClient with an URL ROOT Pattern using the specified params
        :param protocol: Web protocol [HTTP | HTTPS] (string)
        :param host: Hostname or IP (string)
        :param port: Service port (string)
        :param resource: Base URI resource, if exists (string)
        :return: None
        """
Exemplo n.º 10
0
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with [email protected]


import requests
import xmltodict
import xmldict
from json import JSONEncoder
from logger_utils import get_logger, log_print_request, log_print_response

requests.packages.urllib3.disable_warnings()
logger = get_logger(__name__)


# HEADERS
HEADER_CONTENT_TYPE = u'content-type'
HEADER_ACCEPT = u'accept'
HEADER_REPRESENTATION_JSON = u'application/json'
HEADER_REPRESENTATION_XML = u'application/xml'
HEADER_REPRESENTATION_TEXTPLAIN = u'text/plain'
HEADER_AUTH_TOKEN = u'X-Auth-Token'
HEADER_TENANT_ID = u'Tenant-Id'
HEADER_TRANSACTION_ID = u'txid'

# HTTP VERBS
HTTP_VERB_POST = 'post'
HTTP_VERB_GET = 'get'
Exemplo n.º 11
0
# contact with [email protected]

__author__ = "Javier Fernández"
__email__ = "*****@*****.**"
__copyright__ = "Copyright 2015"
__license__ = " Apache License, Version 2.0"
__version__ = "1.0.0"


from logger_utils import get_logger
from fabric.api import env, hide, run, get
from fabric.tasks import execute
from fabric.contrib import files
from StringIO import StringIO

__logger__ = get_logger("qautils")

FABRIC_ASSERT_RESULT = u'<local-only>'


class FabricAssertions():

    @staticmethod
    def assert_file_exist(path):

        """
        Fabric assertion: Check if file exists on the current remote hosts.
        :param path (string): Absolute path to file

        :return (bool): True if given file exists on the current remote host (dir: PROVISION_ROOT_PATH).
        """
Exemplo n.º 12
0
    default=-1,
    help='Number of runs'
)

# =========================================

args = parser.parse_args()
DATA_SET = args.DATA_SET
num_runs = args.num_runs
num_jobs = args.num_jobs

if num_jobs == -1:
    num_jobs = num_runs

LOG_FILE = 'log_results_{}.txt'.format(DATA_SET)
LOGGER = logger_utils.get_logger(LOG_FILE,'DAGMM')

LOGGER.info(DATA_SET)
config_file = 'config.yaml'
with open(config_file, 'r') as fh:
    config = yaml.safe_load(fh)

num_anomaly_sets = config[DATA_SET]['num_anomaly_sets']
anomaly_ratio = config[DATA_SET]['anomaly_ratio']
model_config = config[DATA_SET]['dagmm']

anom_perc = 100 * anomaly_ratio/(1+anomaly_ratio)
print('Anomaly percentage ', anom_perc)

K_values = [2,5]
K_vs_f1 = []
Exemplo n.º 13
0
parser.add_argument(
    '--objective',
    type=str,
    default = 'one-class',
    help='objective',
    choices=['one-class', 'soft-boundary']
)


# =========================================
args = parser.parse_args()
DATA_SET = args.DATA_SET
num_runs = args.num_runs
LOG_FILE = 'log_results_{}.txt'.format(DATA_SET)
LOGGER = logger_utils.get_logger(LOG_FILE,'deepSVDD')
LOGGER.info(DATA_SET)
config_file = 'config.yaml'
anom_perc = args.anom_perc

with open(config_file, 'r') as fh:
    config = yaml.safe_load(fh)

num_anomaly_sets = config[DATA_SET]['num_anomaly_sets']
anomaly_ratio = config[DATA_SET]['anomaly_ratio']
anom_perc = 100 * anomaly_ratio/(1+anomaly_ratio)
step = 0.025
nu_values = np.arange(0.025,0.2+step,step)
nu_vs_auc = []
objective = args.objective
Exemplo n.º 14
0
import re
from datetime import datetime, timedelta
from bs4 import BeautifulSoup
import requests
from logging import INFO, DEBUG

from mygoogleapi import get_table, refresh_table
from browser import start_parse_raja, start_parse_lomp, allowed_ports_lower, clean_data, sources
from logger_utils import get_logger

logger = get_logger('parser', INFO)


# Вырезает таблицу для конкретного источника из полученной гугл таблицы
def get_source_table(google_table, source):
    logger.debug('Вырезаем таблицу ' + source)
    headers = google_table[0]
    source_i = headers.index('source')
    pop_indexes = []
    source_table = []
    for i, row in enumerate(google_table):
        try:
            if row[source_i] == source:
                pop_indexes.append(i)
                source_table.append(row)
        except:
            logger.exception(row)
    pop_indexes.reverse()
    for i in pop_indexes:
        google_table.pop(i)
    return source_table
Exemplo n.º 15
0
        # ----- end iteration loop -----


if __name__ == '__main__':
    if not os.path.exists(config.log_dir):
        os.makedirs(config.log_dir)
    if not os.path.exists(config.model_path):
        os.makedirs(config.model_path)
    optimizer = tf.compat.v2.optimizers.Adam()
    # centralNet = DQSA(input_size=config.input_size_central, usernet=False)
    loss = tf.compat.v2.losses.mean_squared_error
    centralNet = DQSAVersion2(input_size=config.input_size_central,
                              usernet=False,
                              optimizer=optimizer,
                              loss=loss)
    # centralNet.define_loss(loss=loss)
    # centralNet.define_optimizer(optimizer=optimizer)
    logger = get_logger(os.path.join(config.log_dir, "train_log"))
    Tensorcallback = callbacks.TensorBoard(config.log_dir,
                                           write_graph=True,
                                           write_images=False)
    Tensorcallback.set_model(centralNet.model)
    callbacks = {'tensorboard': Tensorcallback}
    DQSATarget = DQSAVersion2(input_size=config.input_size_central,
                              usernet=False,
                              optimizer=optimizer,
                              loss=loss)
    # centralNet.load_weights(path=config.load_ckpt_path)
    # DQSATarget.load_weights(path=config.load_ckpt_path)
    trainDqsa(callbacks, logger, centralNet, DQSATarget)
Exemplo n.º 16
0
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with [email protected]

import requests
import xmltodict
import xmldict
from json import JSONEncoder
from logger_utils import get_logger, log_print_request, log_print_response

requests.packages.urllib3.disable_warnings()
logger = get_logger("restClientUtils")

# HEADERS
HEADER_CONTENT_TYPE = u'content-type'
HEADER_ACCEPT = u'accept'
HEADER_REPRESENTATION_JSON = u'application/json'
HEADER_REPRESENTATION_XML = u'application/xml'
HEADER_REPRESENTATION_TEXTPLAIN = u'text/plain'
HEADER_AUTH_TOKEN = u'X-Auth-Token'
HEADER_TENANT_ID = u'Tenant-Id'
HEADER_TRANSACTION_ID = u'txid'

# HTTP VERBS
HTTP_VERB_POST = 'post'
HTTP_VERB_GET = 'get'
HTTP_VERB_PUT = 'put'
Exemplo n.º 17
0
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with [email protected]

import requests
import xmltodict
import xmldict
from json import JSONEncoder
from logger_utils import get_logger, log_print_request, log_print_response

requests.packages.urllib3.disable_warnings()
logger = get_logger(__name__)

# HEADERS
HEADER_CONTENT_TYPE = u'content-type'
HEADER_ACCEPT = u'accept'
HEADER_REPRESENTATION_JSON = u'application/json'
HEADER_REPRESENTATION_XML = u'application/xml'
HEADER_REPRESENTATION_TEXTPLAIN = u'text/plain'
HEADER_AUTH_TOKEN = u'X-Auth-Token'
HEADER_TENANT_ID = u'Tenant-Id'
HEADER_TRANSACTION_ID = u'txid'

# HTTP VERBS
HTTP_VERB_POST = 'post'
HTTP_VERB_GET = 'get'
HTTP_VERB_PUT = 'put'
Exemplo n.º 18
0
    default='kddcup',
    choices=['kddcup', 'kddcup_neptune', 'nsl_kdd', 'nb15', 'gureKDD'])

parser.add_argument('--anom_perc',
                    type=int,
                    help='Percentage of anomalies',
                    default=None)

parser.add_argument('--num_runs', type=int, default=1, help='Number of runs')

# =========================================
args = parser.parse_args()
DATA_SET = args.DATA_SET
num_runs = args.num_runs
LOG_FILE = 'log_results_{}.txt'.format(DATA_SET)
LOGGER = logger_utils.get_logger(LOG_FILE, 'OCSVM')
anom_perc = args.anom_perc

LOGGER.info(DATA_SET)
config_file = 'config.yaml'
with open(config_file, 'r') as fh:
    config = yaml.safe_load(fh)

num_anomaly_sets = config[DATA_SET]['num_anomaly_sets']
anomaly_ratio = config[DATA_SET]['anomaly_ratio']
if anom_perc is None:
    anom_perc = 100 * anomaly_ratio / (1 + anomaly_ratio)
LOGGER.info(' Setting anomaly percentage to {} %'.format(anom_perc))
nu_values = np.arange(0.1, 0.5 + 0.1, 0.10)
nu_vs_auc = []
for nu in nu_values:
Exemplo n.º 19
0
parser.add_argument("--xlsx",
                    type=bool,
                    default=False,
                    help="Output XLSX file of the statistic results")

parser.add_argument("--logging_level",
                    type=int,
                    default="20",
                    help="Logging level")
parser.add_argument("--logging_file",
                    type=str,
                    default="log.txt",
                    help="Logging file")

args = parser.parse_args()
logger = logger_utils.get_logger(args)


def read_traces(traces_path):
    with open(traces_path, 'r') as fp:
        _traces = json.load(fp)
    if isinstance(_traces, dict):
        traces = _traces.get("traceEvents")
    elif isinstance(_traces, list):
        traces = _traces
    else:
        raise ValueError(
            "The output file not follow the stardard chrome tracing format!: "
            + traces_path)
    return traces
Exemplo n.º 20
0
parser.add_argument(
    '--DATA_SET',
    type=str,
    help=' Which data set ?',
    default='kddcup',
    choices=['kddcup', 'kddcup_neptune', 'nsl_kdd', 'nb15', 'gureKDD'])

parser.add_argument('--num_runs', type=int, default=10, help='Number of runs')

# =========================================

args = parser.parse_args()
DATA_SET = args.DATA_SET
num_runs = args.num_runs
LOG_FILE = 'log_results_{}.txt'.format(DATA_SET)
LOGGER = logger_utils.get_logger(LOG_FILE, 'DCN')

LOGGER.info(DATA_SET)
config_file = 'config.yaml'
with open(config_file, 'r') as fh:
    config = yaml.safe_load(fh)

num_anomaly_sets = config[DATA_SET]['num_anomaly_sets']
anomaly_ratio = config[DATA_SET]['anomaly_ratio']
model_config = config[DATA_SET]['dcn']

anom_perc = 100 * anomaly_ratio / (1 + anomaly_ratio)
step = 1
K_values = np.arange(5, 10 + step, step)
K_vs_auc = []
for K in K_values:
Exemplo n.º 21
0
# encoding: utf-8

"""
@version: v1.0
@author: Gaivin Wang
@license: Apache Licence
@contact: [email protected]
@site: https://github.com/gaivin/
@software: PyCharm
@file: ssh_utils.py
@time: 6/8/2018 3:43 PM
"""
import paramiko
from logger_utils import get_logger

logger = get_logger("SSH_Utils")


class SSHConnection(object):
    def __init__(self, host='192.168.2.103', port=22, username='******', password='******'):
        self.host = host
        self.port = port
        self.username = username
        self.password = password
        self._transport = None
        self._ssh_client = None

    def connect(self):
        transport = paramiko.Transport((self.host, self.port))
        try:
            transport.connect(username=self.username, password=self.password)
Exemplo n.º 22
0
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with [email protected]

__author__ = 'jfernandez'

import uuid
from pkg_resources import resource_string
from logger_utils import get_logger
from constants import CORRELATOR_PATTERN, RESOURCES_SAMPLEDATA_MODULE, RESOURCES_PARAMETER_PATTERN

logger = get_logger("utils")


def generate_correlator():
    """
    Generate a correlator value following defined pattern.
    :return: New correlator
    """

    return CORRELATOR_PATTERN.format(uuid=uuid.uuid4())


def get_probe_data_from_resource_file(filename, replacement_values=None):
    """
    Get probe data from resource files. If replacement_values is not empty,
    :param filename: Resource filename to be used for loading probe data
Exemplo n.º 23
0
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with [email protected]


import requests
import xmltodict
import xmldict
from json import JSONEncoder
from logger_utils import get_logger, log_print_request, log_print_response

requests.packages.urllib3.disable_warnings()
logger = get_logger("restClientUtils")


# HEADERS
HEADER_CONTENT_TYPE = u'content-type'
HEADER_ACCEPT = u'accept'
HEADER_REPRESENTATION_JSON = u'application/json'
HEADER_REPRESENTATION_XML = u'application/xml'
HEADER_REPRESENTATION_TEXTPLAIN = u'text/plain'
HEADER_AUTH_TOKEN = u'X-Auth-Token'
HEADER_TENANT_ID = u'Tenant-Id'
HEADER_TRANSACTION_ID = u'txid'

# HTTP VERBS
HTTP_VERB_POST = 'post'
HTTP_VERB_GET = 'get'
Exemplo n.º 24
0
                text="Users",
                font=dict(
                    family="Courier New, monospace",
                    size=18,
                    color="#7f7f7f"
                )
            )
        )
    )
    fig.show()


if __name__ == '__main__':
    userNet = DQSAVersion2(input_size=config.input_size_user, usernet=True)
    userNet.load_weights(path="/home/dorliv/Desktop/DQSAKeras/successful_experiements/three_users/network_central_best_three_users/checkpoint")
    logger = get_logger(os.path.join(config.evaluate_log_dir, "evaluate_log"))
    Tensorcallback = callbacks.TensorBoard(config.evaluate_log_dir,
                                           write_graph=True, write_images=False)
    Tensorcallback.set_model(userNet.model)
    env = OneTimeStepEnv()
    beta = 10
    alpha = 0  # e_greedy
    draw_heatmap_flag = False
    channelThroughPutPerTstep = initCTP(config.TimeSlotsEvaluate)  # init the data structure to view the mean reward at each t
    for iteration in range(5):
        channelThroughPutMean = 0
        loss_value = []
        collisonsMean = 0
        idle_timesMean = 0
        for episode in range(config.Episodes):
            heatmap = []
Exemplo n.º 25
0
#!/usr/bin/env python
# encoding: utf-8

import subprocess
import datetime
import os
import logger_utils
import fire
from random import randint, sample
from lvm2py import LVM

logger = logger_utils.get_logger("volume_utils")
volume_fill_logger = logger_utils.get_logger(logger_name="volume_fill_history")


def create_volume(volume_size,
                  mount_point=None,
                  fs_type="ext3",
                  vg="fsa",
                  volume_name=None):
    if volume_name is None:
        volume_name = "%s-%s" % (volume_size, fs_type)
    if mount_point is None:
        mount_point = "/lvm/%s-%s" % (vg, volume_name)
    if is_mount_point_available(mount_point):
        logger.info("Mount point %s is exist. Please remove it first." %
                    mount_point)
        return mount_point
    volume_path = get_lv_path(lv_name=volume_name, vg_name=vg)
    if not volume_path:
        volume_path = create_lv(volume_name=volume_name,
Exemplo n.º 26
0
def lra_framework(model: Model, lra_algorithm, x_train, x_test, y_test, dataset, model_name):
    scores = []
    arch = model_name.split('_')[0]
    dir = os.path.join(arch, model_name)
    if not os.path.exists(dir):
        os.makedirs(dir)
    logger_path = os.path.join(dir, 'log_file_model_{}'.format(model_name))
    if os.path.exists(logger_path + ".log"):
        os.remove(logger_path + ".log")
    logger = get_logger(logger_path)
    samples = x_train  # np.concatennum_of_paramsate((x_train, x_test))

    initial_score = model.evaluate(x_test, y_test, verbose=0)


    score = np.copy(initial_score)

    score_to_plot = []
    compression_ratio_to_plot = []

    score_to_plot.append(score[-1])
    compression_ratio_to_plot.append(0)

    opt = tf.keras.optimizers.Adam()

    temp_model = clone_model(model)
    temp_model.compile(
        loss=tf.keras.losses.categorical_crossentropy,
        optimizer=opt,
        metrics=['accuracy'])

    lra_model = clone_model(model)
    lra_model.set_weights(model.get_weights())
    lra_model.compile(
        loss=tf.keras.losses.categorical_crossentropy,
        optimizer=opt,
        metrics=['accuracy'])

    relevant_layers, relevant_layers_index_in_model = get_relevant_layers(model)
    initial_num_of_params = get_initial_number_of_params(relevant_layers)
    print('\n\n')
    it = 0

    while (initial_score[1] - score[1] < cfg.accuracy_tolerance):
        klds = []
        print("Start of Iteration {0}:".format(it))
        logger.info("Start of Iteration {0}:".format(it))

        curr_num_of_params = 0
        for i, layer_index in enumerate(relevant_layers_index_in_model):
            temp_model.set_weights(lra_model.get_weights())

            temp_model, _, _, num_of_params_layer_i = lra_per_layer(temp_model, layer_index=layer_index,
                                                                    algorithm=lra_algorithm, update_memory=False)
            curr_num_of_params += num_of_params_layer_i
            # kld_per_layer = evaluate_kld_for_each_layer(model, temp_model, samples)
            kld_per_layer = evaluate_kld_for_last_layer(model, temp_model, samples)

            print('{} ({}): KLD per layer {:.4f}'.format(relevant_layers[i].name, layer_index, kld_per_layer))
            # klds.append(sum(kld_per_layer))
            klds.append(kld_per_layer)

        print("Compression ratio : {}. \n number of params: \n \t lra_model {:,} \n \t initial model {:,}"
              .format(1 - curr_num_of_params / initial_num_of_params, curr_num_of_params, initial_num_of_params))
        logger.info("Compression ratio : {}.  number of params:  \t lra_model {:,} \t initial model {:,}"
              .format(1 - curr_num_of_params / initial_num_of_params, curr_num_of_params, initial_num_of_params))

        if curr_num_of_params < initial_num_of_params:
            score_to_plot.append(score[-1])
            compression_ratio_to_plot.append(1 - curr_num_of_params / initial_num_of_params)  # so the graph will start from 0 and go to 1
        min_kld_index = np.argmin(klds)

        layer_with_min_kld = relevant_layers[min_kld_index]
        layer_index_in_model_with_min_kld = relevant_layers_index_in_model[min_kld_index]

        # print('---------------- Start Compression with {0} for layer {1}!) ----------------'.format(lra_algorithm,
        #                                                                                             layer_with_min_kld.name))
        lra_model, truncated, full_svs, _ = lra_per_layer(lra_model, layer_index=layer_index_in_model_with_min_kld,
                                                   algorithm=lra_algorithm, update_memory=True)
        if 'tsvd' in lra_algorithm:
            print('Approximate {0} {1} using {2}/{3} singular values'.format(layer_with_min_kld.name,
                                                                             layer_index_in_model_with_min_kld,
                                                                             truncated, full_svs))
            logger.info('Approximate {0} {1} using {2}/{3} singular values'.format(layer_with_min_kld.name,
                                                                             layer_index_in_model_with_min_kld,
                                                                             truncated, full_svs))
        if 'rrqr' in lra_algorithm:
            print('Approximate {0} {1} using {2}/{3} rank ratio '.format(layer_with_min_kld.name,
                                                                             layer_index_in_model_with_min_kld,
                                                                             truncated, full_svs))
            logger.info('Approximate {0} {1} using {2}/{3} rank ratio'.format(layer_with_min_kld.name,
                                                                                   layer_index_in_model_with_min_kld,
                                                                                   truncated, full_svs))

        # print('---------------- Done Compression with {0} for layer {1}!) ----------------'.format(lra_algorithm,
        #                                                                                            layer_with_min_kld.name))

        score = lra_model.evaluate(x_test, y_test, verbose=0)

        scores.append(score)
        print("End of Iteration {}:\ntest loss = {:.3f}\ntest accuracy = {:.3f}\n\n\n".format(it,  score[0], score[1]))
        logger.info("End of Iteration {}:\ntest loss = {:.3f}\ntest accuracy = {:.3f}\n\n\n".format(it,  score[0], score[1]))
        it += 1

    save_model_path = os.path.join(dir, '{0}_{1}_lra.h5'.format(model_name, dataset))
    print('Saving model to: ', save_model_path)
    logger.info('Saving model to:  {}'.format(save_model_path))
    save_model(lra_model, save_model_path, include_optimizer=False, save_format='h5')
    score_to_plot = np.asarray(score_to_plot)
    compression_ratio_to_plot = np.asarray(compression_ratio_to_plot)
    np.save(os.path.join(dir, 'score_{}'.format(model_name)), score_to_plot)
    np.save(os.path.join(dir, 'compression_{}'.format(model_name)), compression_ratio_to_plot)
    plot_score_versus_compression(save_dir=dir, score_data=score_to_plot,
                                  compression_data=compression_ratio_to_plot, model_name=model_name, algo=lra_algorithm)
Exemplo n.º 27
0
@author: Gaivin Wang
@license: Apache Licence
@contact: [email protected]
@site: https://github.com/gaivin/
@software: PyCharm
@file: pyrobot.py
@time: 5/13/2019 10:00 AM
"""

import subprocess
import os
import fire
import sys
from logger_utils import get_logger

logger = get_logger("pyrobot")


def robot(test, include=None, exclude=None, variable=None, debug=None, rerun_failed=False, robot_executor="pybot",
          rebot_executor="rebot", **robot_kwargs):
    output = "output.xml"
    outputdir = os.path.abspath(os.path.curdir)
    cmd = robot_executor
    if exclude:
        cmd += _generate_options(option_type="exclude", values=exclude)
    if variable:
        cmd += _generate_options(option_type="variable", values=variable)
    if include:
        cmd += _generate_options(option_type="include", values=include)
    if debug:
        cmd += _generate_options(option_type="debug", values=debug)
Exemplo n.º 28
0
# encoding: utf-8

import logger_utils
import fire
import volume_utils
import config

logger = logger_utils.get_logger("backup_excutor")


def create_volumes(volumes, fs="ext3"):
    for mount_point, size in volumes.items():
        volume_utils.create_volume(volume_size=size, mount_point=mount_point, fs_type=fs)
    return True


def fill(targets=config.VOLUMES_FILL_TARGET):
    for volume, target in targets.items():
        logger.info("Start Fill %s to %s..." % (volume, target))
        volume_utils.fill_volume(volume=volume, percentage=target)
        logger.info("Completed Fill %s to %s." % (volume, target))
    volume_utils.execute("df -h")


def create():
    do = raw_input('You are creating new volumes via your configuration. Are you sure this operation:(yes/[no]) ')
    if do == "yes":
        create_volumes(volumes=config.EXT3_VOLUMES, fs="ext3")
        create_volumes(volumes=config.EXT4_VOLUMES, fs="ext4")
        logger.info("Volumes creation completed.")
        lvs = volume_utils.list_lv()
Exemplo n.º 29
0
__author__ = 'jfernandez'


import requests
import xmltodict
import xmldict
from json import JSONEncoder
from constants import HEADER_REPRESENTATION_JSON, HEADER_REPRESENTATION_XML, HTTP_VERB_POST, HTTP_VERB_DELETE, \
    HTTP_VERB_GET, HTTP_VERB_PUT, HTTP_VERB_UPDATE
from logger_utils import get_logger, log_print_request, log_print_response

API_ROOT_URL_ARG_NAME = 'api_root_url'
URL_ROOT_PATTERN = "{protocol}://{host}:{port}"

logger = get_logger("rest_client_utils")


class RestClient(object):

    api_root_url = None

    def __init__(self, protocol, host, port, resource=None):
        """
        This method init the RestClient with an URL ROOT Pattern using the specified params
        :param protocol: Web protocol [HTTP | HTTPS] (string)
        :param host: Hostname or IP (string)
        :param port: Service port (string)
        :param resource: Base URI resource, if exists (string)
        :return: None
        """
Exemplo n.º 30
0
__author__ = 'jfernandez'


from lettuce import world
from logger_utils import get_logger
import os
import sys
import json
from remote_tail_utils import RemoteTail
from constants import PROPERTIES_FILE, PROPERTIES_CONFIG_ENV, PROPERTIES_CONFIG_ENV_LOGS_PATH, \
    PROPERTIES_CONFIG_ENV_LOCAL_PATH_REMOTE_LOGS, MONITORING_CONFIG_SERVICE_PRIVATEKEY, \
    MONITORING_CONFIG_SERVICE_LOG_PATH, MONITORING_CONFIG_SERVICE_HOST, MONITORING_CONFIG_SERVICE_HOSTUSER, \
    MONITORING_CONFIG_SERVICE_ADAPTER, MONITORING_CONFIG_SERVICE_LOG_FILE_NAME

logger = get_logger("terrain_utils")


def _load_project_properties():
    """
    Parse the JSON configuration file located in the src folder and
    store the resulting dictionary in the lettuce world global variable.
    """

    logger.debug("Loading test properties")
    with open(PROPERTIES_FILE) as config_file:
        try:
            world.config = json.load(config_file)
        except Exception, e:
            logger.error('Error parsing config file: %s' % e)
            sys.exit(1)
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with [email protected]

__author__ = 'jfernandez'


from paramiko.ssh_exception import SSHException
from sshtail import SSHTailer, load_dss_key, load_rsa_key
from logger_utils import get_logger
import time
import threading


logger = get_logger("remote_tail_utils")

# Delay period just after starting remote tailers
TIMER_DELAY_PERIOD = 3

# Grace period when stopping thread. 3 seconds by default
TIMER_GRACE_PERIOD = 3

# Global flag
_tail_terminate_flag = False


class RemoteTail:

    def __init__(self, remote_host_ip, remote_host_user, remote_log_path, remote_log_file_name, local_log_target,
                 private_key):
Exemplo n.º 32
0
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with [email protected]

__author__ = 'jfernandez'


from sshtail import SSHTailer, load_dss_key
import time
import threading
from logger_utils import get_logger


logger = get_logger("remote_tail_utils")

# Delay period just after starting remote tailers
TIMER_DELAY_PERIOD = 3

# Grace period when stopping thread. 3 seconds by default
TIMER_GRACE_PERIOD = 3

# Global flag
_tail_terminate_flag = False


class RemoteTail:

    def __init__(self, remote_host_ip, remote_host_user, remote_log_path, remote_log_file_name, local_log_target,
                 private_key):