예제 #1
0
파일: console.py 프로젝트: john5223/usage
def console_report():
    """Runs a report from the cli."""

    args = parser.parse_args()
    conf = config.load(args.config_file)
    logger.setLevel(LOG_LEVELS.get(args.log_level.lower(), 'info'))

    manager = ClientManager(**conf.get('auth_kwargs', {}))
    ceilometer = manager.get_ceilometer()

    if args.mtd:
        start, stop = utils.mtd_range()
    elif args.today:
        start, stop = utils.today_range()
    elif args.last_hour:
        start, stop = utils.last_hour_range()
    else:
        start, stop = utils.mtd_range()

    r = Report(
        ceilometer,
        args.definition_filename,
        args.csv_filename,
        start=start,
        stop=stop
    )
    r.run()
예제 #2
0
def test():
    logger.info("Starting test...")
    args = argparser.parse_args()
    conf = config.load(args.config_file)
    auth_kwargs = config.get('auth_kwargs', {})
    clients = ClientManager(**config.get('auth_kwargs', {}))
    volume_delete(clients, conf)
예제 #3
0
def run():
    logger.info("Starting randomload...")
    actions = [
        server_create,
        server_delete,
        image_create,
        image_delete,
        volume_create,
        volume_delete
    ]
    args = argparser.parse_args()
    conf = config.load(args.config_file)
    interval = conf.get('interval', 60)

    clients = ClientManager(**conf.get('auth_kwargs', {}))

    last_action_time = 0
    while True:
        now = time.time()
        if now - last_action_time > interval:
            action = utils.randomfromlist(actions)
            try:
                action(clients, conf=conf)
            except Exception as e:
                print e
            last_action_time = time.time()
        time.sleep(1)
예제 #4
0
from args import parser, get_config_module
import hashlib
import hmac
import sys
import requests
import urllib

parser.add_argument("path", metavar="P", type=str, help="Path to query metadata of")
vals = parser.parse_args()
config = get_config_module(vals.config)
path = vals.path

try:
    from config import app_config
    key = app_config["PUBLISHER_SECRET_KEY"]
    root = app_config.get("APPLICATION_ROOT", "")
except ImportError, KeyError:
    print "Unable to retrieve secret key for metadata request"
    sys.exit(1)

try:
    from config import host_config
    if "host" in host_config:
        host = host_config["host"]
    else:
        host = "127.0.0.1"

    if "port" in host_config:
        port = host_config["port"]
    else:
        port = 8000
예제 #5
0
                # Save all tenant stats
                v, m, s = r.active_stats()
                print ("Active vcpus", v)
                print ("Active memory MB", m)
                print ("Active storage GB", s)
                all_tenant = Tenant.get_or_create(session, all_tenant_id)
                session.commit()
                session.add(ActiveVCPUS(
                    value=v, time=now, tenant_id=all_tenant.id
                ))
                session.add(ActiveMemoryMB(
                    value=m, time=now, tenant_id=all_tenant.id
                ))
                session.add(ActiveLocalStorageGB(
                    value=s, time=now, tenant_id=all_tenant.id
                ))

            last_polled = time.time()
            print ("Updating polling interval")
        time.sleep(1)
    exit()


# Parse arguments and then take action.
args = argparser.parse_args()
if args.subcommand == 'report':
    report(args)

elif args.subcommand == 'agent':
    agent(args)
import numpy as np
import pandas as pd
import librosa
import os
import pywt
import cv2 as cvlib
from args import parser
import matplotlib.pyplot as plt

args = parser.parse_args()

#normalize each colour image between -1 and 1
def normalize_data_all_gather(vect_in, out_min, out_max, percent_acceptation=80,
                              not_clip_until_acceptation_time_factor=1.5):
    # nb_dim = len(vect_in.shape)
    percent_val = np.percentile(abs(vect_in).reshape((vect_in.shape[0], vect_in.shape[1] * vect_in.shape[2])),
                                percent_acceptation, axis=1)
    percent_val_matrix = not_clip_until_acceptation_time_factor * np.repeat(percent_val,
                                                                            vect_in.shape[1] * vect_in.shape[2],
                                                                            axis=0).reshape(
        (vect_in.shape[0], vect_in.shape[1], vect_in.shape[2]))
    matrix_clip = np.maximum(np.minimum(vect_in, percent_val_matrix), -percent_val_matrix)
    return np.divide(matrix_clip, percent_val_matrix) * ((out_max - out_min) / 2) + (out_max + out_min) / 2


# Read data from file 'filename.csv'
path_liste_file= args.audio_listefile_folder_path
# Control delimiters, rows, column names with read_csv (see later)
#data = pd.read_csv("/Users/vincentbelz/Documents/Data/audio_classification/liste_file/esc50.csv")
data = pd.read_csv(path_liste_file)
# Preview the first 5 lines of the loaded data
예제 #7
0
파일: mtag.py 프로젝트: ToxicFrog/mo
    for key, value in zip(fields, values.groups()):
        if key:
            setTag(id3, key, value)


def main(options):
    music = findMusic(options.paths)

    for i, tags in enumerate(music):
        try:
            for tag in options.tags:
                if tag[0].startswith("auto"):
                    autoTag(tags, tag[1], tags.file)
                else:
                    setTag(tags, tag[0], tag[1])
            sys.stdout.write("\r\x1B[K[%d/%d] %s    " %
                             (i, len(music), tags.file))
            sys.stdout.flush()
            tags.save()
        except Exception as e:
            print("ERROR: %s: %s" % (str(e), tags.file))
            print(sys.exc_info()[0])

    sys.stdout.write("\r\x1B[K[%d/%d] DONE    \n" % (len(music), len(music)))


subparser.set_defaults(func=main, command='tag')
if __name__ == "__main__":
    main(parser.parse_args())
예제 #8
0
    from tempfile import TemporaryDirectory

    temp_dir = TemporaryDirectory('tumblr_crawler_cli')
except (ImportError, ImportError):
    temp_dir = '.tumblr_crawler_cli'
    os.mkdir(temp_dir) if not os.path.exists(temp_dir) else None

from args import parser
from utils import safe_format, clean_fn

# endregion

queue_sites = Queue()  # 待解析站点队列
queue_down = Queue()  # 下载任务队列
down_stop = False  # 下载停止信号
cli_args = parser.parse_args()  # 命令行参数

# 默认全部下载
if not cli_args.down_photo and not cli_args.down_video:
    cli_args.down_photo = cli_args.down_video = True
# 创建http request session并设置代理
session = requests.session()
if cli_args.proxy:
    session.proxies = {'http': cli_args.proxy, 'https': cli_args.proxy}
# 初始化待解析站点队列
for _site in cli_args.sites:
    queue_sites.put(_site)

# 当post信息非标准格式时解析图片的正则
photo_regex = re.compile(r'https://\d+.media.tumblr.com/\w{32}/tumblr_[\w.]+')
예제 #9
0
from torchvision import models
from torch.nn import functional as F
import numpy as np
import cv2
from args import parser
from utils import open_and_preprocess, classes

options = parser.parse_args()


def resnet152_cam(feature_conv, weight_softmax, class_idx):
    size_upsample = (256, 256)
    bz, nc, h, w = feature_conv.shape
    output_cam = []
    for idx in class_idx:
        cam = weight_softmax[class_idx].dot(feature_conv.reshape((nc, h * w)))
        cam = cam.reshape(h, w)
        cam = cam - np.min(cam)
        cam_img = cam / np.max(cam)
        cam_img = np.uint8(255 * cam_img)
        output_cam.append(cv2.resize(cam_img, size_upsample))
    return output_cam


def hook_feature(module, input, output):
    features_blobs.append(output.data.cpu().numpy())


model = models.resnet152(pretrained=True)
final_layer = 'layer4'
예제 #10
0
import matplotlib
matplotlib.use("Agg")

import util.vutils as vutils
from tensorboardX import SummaryWriter
from args import parser
from data.data_loader import CustomDatasetDataLoader, InfiniteDataLoader
from models.baseline import Baseline
from models.semantic_reconstruct import SemanticReconstruct
from models.semantic_consistency import SemanticConsistency
from models.semantic_self_consist import SemanticSelfSupConsistency

#########################################################################
# options
opt = parser.parse_args()

# output directories
opt.out_dir = os.path.join(
    opt.out_dir, opt.dataset, opt.name, 'suprate%.3f_droprate%.2f_seed%d' %
    (opt.sup_portion, opt.x_drop, opt.seed))
os.makedirs(opt.out_dir, exist_ok=True)

# data_loaders
val_loader = CustomDatasetDataLoader(opt, istrain=False)
train_loader = CustomDatasetDataLoader(opt, istrain=True, suponly=False)
opt = train_loader.update_opt(opt)

## wrap with infinite loader
#train_loader = InfiniteDataLoader(train_loader)