Beispiel #1
0
import logging

import M2Crypto
import time
import hashlib
import os
import utils
import tempfile
import shutil
import contrib
import flags

FLAGS = flags.FLAGS

flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA')
flags.DEFINE_string('keys_path', utils.abspath('../keys'),
                    'Where we keep our keys')
flags.DEFINE_string('ca_path', utils.abspath('../CA'),
                    'Where we keep our root CA')


def ca_path(username):
    if username:
        return "%s/INTER/%s/cacert.pem" % (FLAGS.ca_path, username)
    return "%s/cacert.pem" % (FLAGS.ca_path)


def fetch_ca(username=None, chain=True):
    buffer = ""
    with open(ca_path(username), "r") as cafile:
        buffer += cafile.read()
Beispiel #2
0
_VALID_INPUT_FEATURES = frozenset({
    data.SEQUENCE_ONE_HOT,
    data.SEQUENCE_KMER_COUNT,
})

TUNER_LOSS_LOSS = 'loss'
TUNER_LOSS_AUC = 'auc/true_top_1p'
TUNER_GOAL_MAX = 'MAXIMIZE'
TUNER_GOAL_MIN = 'MINIMIZE'
TUNER_LOSS_TO_GOAL = {
    TUNER_LOSS_LOSS: TUNER_GOAL_MIN,
    TUNER_LOSS_AUC: TUNER_GOAL_MAX,
}

flags.DEFINE_integer('task', 0, 'Task id when running online')
flags.DEFINE_string('master', '', 'TensorFlow master to use')
flags.DEFINE_string('input_dir', None, 'Path to input data.')
flags.DEFINE_string(
    'affinity_target_map', '',
    'Name of the affinity map from count values to affinity values. '
    'Needed only if using input_dir and running inference or using '
    'microarray values.')
flags.DEFINE_enum(
    'dataset', None,
    sorted(config.INPUT_DATA_DIRS),
    'Name of dataset with known input_dir on which to train. Either input_dir '
    'or dataset is required.')
flags.DEFINE_integer('val_fold', 0, 'Fold to use for validation.')
flags.DEFINE_string('save_base', None,
                    'Base path to save any output or weights.')
flags.DEFINE_string('run_name', None, 'Name of folder created in save_base.')
Beispiel #3
0
from lasagne.objectives import categorical_accuracy, categorical_crossentropy
from lasagne.updates import adam
from lasagne.utils import floatX
from theano import tensor as T

import flags
from at import adversarial_training
from data import batch_iterator, mnist_load, select_balanced_subset
from deepfool import deepfool
from models import create_network, with_end_points
from utils import (build_result_str, save_images, save_network,
                   setup_train_experiment)

# experiment parameters
flags.DEFINE_integer("seed", 1, "experiment seed")
flags.DEFINE_string("name", None, "name of the experiment")
flags.DEFINE_string("data_dir", "data", "path to data")
flags.DEFINE_string("train_dir", "runs", "path to working dir")

# gan model parameters
flags.DEFINE_string("model", "mlp", "model name (mlp or mlp_with_bn)")
flags.DEFINE_string("layer_dims", "1000-1000-1000-10",
                    "dimensions of fully-connected layers")
flags.DEFINE_bool("use_dropout", False, "whenever to use dropout or not")
flags.DEFINE_float("lmbd", 1.0, "regularization coefficient")
flags.DEFINE_float("epsilon", 0.2, "epsilon for generative fgsm perturbation")

# adversary parameters
flags.DEFINE_integer("deepfool_iter", 25,
                     "maximum number of deepfool iterations")
flags.DEFINE_float("deepfool_clip", 0.5, "perturbation clip during search")
Beispiel #4
0
from daemon import pidlockfile

import flags

FLAGS = flags.FLAGS

flags.DEFINE_bool('daemonize', False, 'daemonize this process')

# NOTE(termie): right now I am defaulting to using syslog when we daemonize
#               it may be better to do something else -shrug-

# (Devin) I think we should let each process have its own log file
#         and put it in /var/logs/nova/(appname).log
#         This makes debugging much easier and cuts down on sys log clutter.
flags.DEFINE_bool('use_syslog', True, 'output to syslog when daemonizing')
flags.DEFINE_string('logfile', None, 'log file to output to')
flags.DEFINE_string('pidfile', None, 'pid file to output to')
flags.DEFINE_string('working_directory', './', 'working directory...')


def stop(pidfile):
    """
    Stop the daemon
    """
    # Get the pid from the pidfile
    try:
        pf = file(pidfile, 'r')
        pid = int(pf.read().strip())
        pf.close()
    except IOError:
        pid = None
from context import firestore
from context import location
from context import logger
from context import network_utils
import datetime
import flags
import json
import os
import time

FLAGS = flags.FLAGS

flags.DEFINE_string('location_name', None, 'Set this location name')
flags.DEFINE_string('base_path', None, 'Set the firebase base path.')
flags.DEFINE_string('service_account_json', None,
                    'Set the file path to the service account key json file.')

timestamp = datetime.datetime.fromtimestamp(
    time.time()).strftime('%Y-%m-%d %H:%M:%S')
database_url = (
    'https://%s.firebaseio.com' %
    json.loads(open(FLAGS.service_account_json).read())['project_id'])
fs = firestore.FireStore(FLAGS.base_path, database_url,
                         FLAGS.service_account_json)
lan_interfaces = network_utils.get_lan_interfaces()
update_location = location.Location(
    name=FLAGS.location_name,
    external_ip_address=network_utils.get_external_ip_address(),
    local_ip_address=network_utils.get_lan_interfaces(),
    metadata={
        'project': 'address-locator-python',
Beispiel #6
0
import subprocess
import random
import time

from nova.utils import runthis, generate_uid
from nova import rpc, datastore, exception

from nova import contrib
import flags
import socket

from tornado import ioloop
from twisted.internet import defer

FLAGS = flags.FLAGS
flags.DEFINE_string('storage_dev', '/dev/sdb',
                    'Physical device to use for volumes')
flags.DEFINE_string('volume_group', 'nova-volumes',
                    'Name for the VG that will contain exported volumes')
flags.DEFINE_string('aoe_eth_dev', 'eth0',
                    'Which device to export the volumes on')

flags.DEFINE_string('storage_name', socket.gethostname(), 'name of this node')
flags.DEFINE_string('storage_availability_zone', 'nova',
                    'availability zone of this node')
KEEPER = datastore.keeper(prefix="storage")


class BlockStore(object):
    def __init__(self):
        super(BlockStore, self).__init__()
        self.volume_class = Volume
Beispiel #7
0
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark script for TensorFlow.

See the README for more information.
"""

from __future__ import print_function

#from absl import app
from absl import flags as absl_flags
import tensorflow as tf
import flags

flags.DEFINE_string('network_dir', None, 'network file path.')
flags.DEFINE_string('network', 'network.py', 'network file name')
flags.DEFINE_string('data_dir', None, 'dataset location')
flags.DEFINE_integer('small_chunk', 1, 'accumulate gradients.')
flags.DEFINE_string('memory_saving_method', None,
                    'setup the memory saving method, 1. recomputing 2. TBD ')
flags.DEFINE_enum('lr_policy', 'multistep', ('multistep', 'exp'),
                  'learning_rate policy')
flags.DEFINE_boolean('aug_flip', True,
                     'whether randomly flip left or right dataset')
flags.DEFINE_integer(
    'stop_accu_epoch', 0, 'early stop when accuracy does not increase 1% for'
    'numbers of epochs')
flags.DEFINE_boolean('save_stop', True,
                     'whether to save checkpoint when killing process')
flags.DEFINE_list(
# pylint: enable=line-too-long

# Google internal
import apache_beam as beam
import runner
import app
import flags

from ..learning import eval_feedforward
from ..utils import pool

FLAGS = flags.FLAGS
flags.DEFINE_integer('num_batches', 1000, 'Number of batches to run')
flags.DEFINE_integer('batch_size', 10000, 'Number of sequences per batch')
flags.DEFINE_integer('num_to_save', 2000, 'The number of top results to save.')
flags.DEFINE_string('target_name', None,
                    'The name of the target protein for the inference.')
flags.DEFINE_string('model_dir', None,
                    'The path to base trained model directory.')
flags.DEFINE_string('checkpoint_path', None,
                    'String path to the checkpoint of the model.')
flags.DEFINE_string('output_name', None, 'The name of the output file.')
flags.DEFINE_integer('sequence_length', 40, 'The length of sequences to test.')
flags.DEFINE_string('affinity_target_map', None, 'Name of affinity target map')

_METRICS_NAMESPACE = 'SearchInference'


class RunInferenceForBatch(beam.PTransform):
    """Generates random batches and runs inference."""
    def __init__(self, sequence_length, target_name, model_dir,
                 checkpoint_path, affinity_target_map):
Beispiel #9
0
import flags
import gfile
import sstable


from ..util import measurement_pb2
from ..preprocess import utils


class Error(Exception):
  pass


FLAGS = flags.FLAGS

flags.DEFINE_string("fastq1", None, "Path to the first fastq file.")
flags.DEFINE_string("fastq2", None,
                    "Path to the second fastq file for paired-end "
                    "sequencing, or None for single end")
flags.DEFINE_integer("measurement_id", None,
                     "The measurement data set ID for this fastq pair, from "
                     "the experiment proto")
flags.DEFINE_integer("sequence_length", 40,
                     "Expected length of each sequence read")
flags.DEFINE_string("output_name",
                    "xxx"
                    "aptitude", "Path and name for the output sstable")
flags.DEFINE_integer("base_qual_threshold", 20, "integer indicating the "
                     "lowest quality (on scale from 0 to 40) for a single "
                     "base to be considered acceptable")
flags.DEFINE_integer("bad_base_threshold", 5, "integer indicating the maximum "
Beispiel #10
0
from collections import OrderedDict

import numpy as np
import theano
from theano import tensor as T
from lasagne.objectives import categorical_accuracy

import flags
from at import fast_gradient_perturbation
from data import batch_iterator, mnist_load, select_balanced_subset
from deepfool import deepfool
from models import create_network, with_end_points
from utils import (load_network, load_training_params, build_result_str,
                   save_images)

flags.DEFINE_string("load_dir", None, "path to load checkpoint from")
flags.DEFINE_integer("load_epoch", None, "epoch for which restore model")
flags.DEFINE_string("working_dir", "test", "path to working dir")
flags.DEFINE_bool("sort_labels", True, "sort labels")
flags.DEFINE_integer("batch_size", 100, "batch_index size (default: 100)")
flags.DEFINE_float("fgsm_epsilon", 0.2, "fast gradient epsilon (default: 0.2)")
flags.DEFINE_integer("deepfool_iter", 50,
                     "maximum number of deepfool iterations (default: 25)")
flags.DEFINE_float("deepfool_clip", 0.5,
                   "perturbation clip during search (default: 0.1)")
flags.DEFINE_float("deepfool_overshoot", 0.02,
                   "multiplier for final perturbation")
flags.DEFINE_integer("summary_frequency", 10, "summarize frequency")

FLAGS = flags.FLAGS
logger = logging.getLogger()
Beispiel #11
0
import os
import contrib
import anyjson

import flags
import utils

FLAGS = flags.FLAGS

flags.DEFINE_string('datastore_path', utils.abspath('../keeper'),
                    'where keys are stored on disk')


class keeper(object):
    def __init__(self, prefix="nova-"):
        self.prefix = prefix
        try:
            os.mkdir(FLAGS.datastore_path)
        except:
            pass

    def _slugify(self, key):
        return key

    def __delitem__(self, item):
        item = self._slugify(item)
        path = "%s/%s%s" % (FLAGS.datastore_path, self.prefix, item)
        if os.path.isfile(path):
            os.remove(path)

    def __getitem__(self, item):
Beispiel #12
0
from __future__ import print_function

from Predictor import Predictor
import flags

flags.DEFINE_string('dataset', 'cora', '[cora, citeseer]')
flags.DEFINE_string('subgraph', 'subgraph/',
                    'Directory of all subgraphs, each file is a subgraph')
flags.DEFINE_string('graph', 'graph.txt', 'Edge list of the complete graph')
flags.DEFINE_string('kernel', 'kernel.json', 'Kernels to be matched')
flags.DEFINE_string('query', 'query',
                    'Used to create query files used by SubMatch')
flags.DEFINE_string('meta', 'meta/',
                    'Directory of matched instances of kernels')
flags.DEFINE_string('data', 'data.txt', None)
flags.DEFINE_string('feature', 'feature.txt', None)
flags.DEFINE_string('label', 'label.txt', None)

flags.DEFINE_boolean('use_feature', True, 'Use feature or not')
flags.DEFINE_boolean('use_embedding', True, 'Use embedding or not')
flags.DEFINE_integer('feat_dim', -1, None)
flags.DEFINE_list(
    'node_dim', [256],
    'Dimension of hidden layers between feature and node embedding')
flags.DEFINE_list(
    'instance_h_dim', [256],
    'Dimension of hidden layers between node embedding and instance embedding, last element is the dimension of instance embedding'
)
flags.DEFINE_list(
    'graph_h_dim', [128],
    'Dimension of hidden layers between instance embedding and subgraph embedding, last element is the dimension of subgraph embedding'