コード例 #1
0
ファイル: alpha_blend.py プロジェクト: caomw/patchtable
def main():
    args = sys.argv[1:]
    if len(args) != 4:
        print >> sys.stderr, __doc__
        sys.exit(1)
    
    (in1, in2, out, alpha) = args
    alpha = float(alpha)
    
    L = []
    for i in range(1, 1000**2):
        if not os.path.exists(in1%i):
            break
        L.append(i)
    
    def process(i):
        print 'Processing frame', i
        I1 = skimage.img_as_float(skimage.io.imread(in1%i))
        I2 = skimage.img_as_float(skimage.io.imread(in2%i))
        I = I1 + (I2-I1) * alpha
        skimage.io.imsave(out%i, I)
    
    threadmap.map(process, L[::-1], dynamic=True)
コード例 #2
0
def main():
    args = sys.argv[1:]
    if len(args) != 4:
        print >> sys.stderr, __doc__
        sys.exit(1)

    (in1, in2, out, alpha) = args
    alpha = float(alpha)

    L = []
    for i in range(1, 1000**2):
        if not os.path.exists(in1 % i):
            break
        L.append(i)

    def process(i):
        print 'Processing frame', i
        I1 = skimage.img_as_float(skimage.io.imread(in1 % i))
        I2 = skimage.img_as_float(skimage.io.imread(in2 % i))
        I = I1 + (I2 - I1) * alpha
        skimage.io.imsave(out % i, I)

    threadmap.map(process, L[::-1], dynamic=True)
コード例 #3
0
ファイル: isearch.py プロジェクト: ymchen7/patchtable
def main():
    random.seed(0)
    L = [random.randrange(max_dim) for i in range(ndims)] #range(ndims)
    fbest = f(L)

    def get_improvement(i):
        current = list(L)
        not_included = list(set(range(max_dim)) - set(current))
        i = random.randrange(len(current))
        current[i] = random.choice(not_included)
        fprime = f(current)
        if fprime < fbest:
            return (fprime, current)
        return (fbest, L)
    
    for iter in range(max_iters):
        improved_list = threadmap.map(get_improvement, range(nproc), n=nproc, dynamic=True)
        for (fprime, current) in improved_list:
            if fprime < fbest:
                fbest = fprime
                L = current
        print iter, ','.join(str(x) for x in L), fbest 
コード例 #4
0
ファイル: autotune.py プロジェクト: damianfral/Halide
    def test_func(scheduleL, constraints, status_callback, timer, save_output=False, ref_output=''):       # FIXME: Handle constraints
        in_image = p.in_image
        def subprocess_args(schedule, schedule_str, compile=True):
            binary_file = os.path.join(p.tune_dir, 'f' + schedule.identity())
            mode_str = 'compile' if compile else 'run'
            
            sh_args = ['python', 'autotune.py', 'autotune_%s_child'%mode_str, filter_func_name, schedule_str, os.path.abspath(in_image), '%d'%p.trials, binary_file, str(int(save_output)), ref_output]
            sh_name = binary_file + '_' + mode_str + '.sh'
            with open(sh_name, 'wt') as sh_f:
                os.chmod(sh_name, 0755)
                sh_f.write(' '.join(sh_args[:4]) + ' "' + repr(sh_args[4])[1:-1] + '" ' + ' '.join(sh_args[5:-1]) + ' ' + '"' + sh_args[-1] + '"' + '\n')
            return (sh_args, binary_file + '.png')
            
        # Compile all schedules in parallel
        compile_count = [0]
        lock = threading.Lock()
        def compile_schedule(i):
            status_callback('Compile %d/%d'%(compile_count[0]+1,len(scheduleL)))
            
            schedule = scheduleL[i]
            schedule_str = str(schedule)
            if schedule_str in cache:
                return cache[schedule_str]
            
            T0 = time.time()
            (argL, output) = subprocess_args(schedule, schedule_str, True)
            res,out = run_timeout(argL, p.compile_timeout, last_line=True)
            Tcompile = time.time()-T0
            
            timer.compile_time = timer_compile + time.time() - Tbegin_compile
            with lock:
                compile_count[0] += 1

            if out is None:
                return {'time': COMPILE_TIMEOUT, 'compile': Tcompile, 'run': 0.0, 'output': output}
            if not out.startswith('Success'):
                return {'time': COMPILE_FAIL, 'compile': Tcompile, 'run': 0.0, 'output': output}
            return {'time': 0.0, 'compile': Tcompile, 'run': 0.0, 'output': output}
        
        timer_compile = timer.compile_time
        Tbegin_compile = time.time()
        shuffled_idx = range(len(scheduleL))
        random.shuffle(shuffled_idx)
        compiledL = threadmap.map(compile_schedule, shuffled_idx, n=nproc)

        #Ttotal_compile = time.time()-Tbegin_compile
        
        assert len(compiledL) == len(scheduleL)
        
        # Run schedules in serial
        def run_schedule(i):
            status_callback('Run %d/%d'%(i+1,len(scheduleL)))
            schedule = scheduleL[i]

            schedule_str = str(schedule)
            if schedule_str in cache:
                return cache[schedule_str]

            compiled_ans = compiledL[i]
            if get_error_str(compiled_ans['time']) is not None:
                return compiled_ans
                
            T0 = time.time()
            #res,out = run_timeout(subprocess_args(schedule, schedule_str, False), best_run_time[0]*p.run_timeout_mul*p.trials+p.run_timeout_bias, last_line=True)
            (argL, output) = subprocess_args(schedule, schedule_str, False)
            res,out = autotune_child(argL[2:], best_run_time[0]*p.run_timeout_mul*p.trials+p.run_timeout_bias+(p.run_save_timeout if save_output else 0.0))
            Trun = time.time()-T0
            
            if out is None:
                ans = {'time': RUN_TIMEOUT, 'compile': compiled_ans['compile'], 'run': Trun, 'output': output}
            elif not out.startswith('Success') or len(out.split()) != 2:
                code = RUN_FAIL
                if out.startswith('RUN_CHECK_FAIL'):
                    code = RUN_CHECK_FAIL
                ans = {'time': code, 'compile': compiled_ans['compile'], 'run': Trun, 'output': output}
            else:
                T = float(out.split()[1])
                best_run_time[0] = min(best_run_time[0], T)
                ans = {'time': T, 'compile': compiled_ans['compile'], 'run': Trun, 'output': output}
                        
            timer.run_time = timer_run + time.time() - Tbegin_run
            
            return ans
        
        # Run, cache and display schedule in serial
        def run_full(i):
            ans = run_schedule(i)
            
            schedule = scheduleL[i]
            cache.setdefault(str(schedule), ans)

            e = get_error_str(ans['time'])
            first_part = 'Error %s'%e if e is not None else 'Best time %.4f'%ans['time']
            log_sched(p, schedule, '%s, compile=%.4f, run=%.4f'%(first_part, ans['compile'], ans['run']))
            return ans
            
        Tbegin_run = time.time()
        timer_run = timer.run_time
        runL = map(run_full, range(len(scheduleL)))
        
#        for i in range(len(scheduleL)):
#            schedule = scheduleL[i]
            #runL[i]['compile_avg'] = Ttotal_compile/len(scheduleL)
        
        return runL
コード例 #5
0
ファイル: autotune.py プロジェクト: wishqube/Halide
    def test_func(scheduleL,
                  constraints,
                  status_callback,
                  timer,
                  save_output=False,
                  ref_output=''):  # FIXME: Handle constraints
        in_image = p.in_image

        def subprocess_args(schedule, schedule_str, compile=True):
            binary_file = os.path.join(p.tune_dir, 'f' + schedule.identity())
            mode_str = 'compile' if compile else 'run'

            sh_args = [
                'python', 'autotune.py',
                'autotune_%s_child' % mode_str, filter_func_name, schedule_str,
                os.path.abspath(in_image),
                '%d' % p.trials, binary_file,
                str(int(save_output)), ref_output
            ]
            sh_name = binary_file + '_' + mode_str + '.sh'
            with open(sh_name, 'wt') as sh_f:
                os.chmod(sh_name, 0755)
                sh_f.write(' '.join(sh_args[:4]) + ' "' +
                           repr(sh_args[4])[1:-1] + '" ' +
                           ' '.join(sh_args[5:-1]) + ' ' + '"' + sh_args[-1] +
                           '"' + '\n')
            return (sh_args, binary_file + '.png')

        # Compile all schedules in parallel
        compile_count = [0]
        lock = threading.Lock()

        def compile_schedule(i):
            status_callback('Compile %d/%d' %
                            (compile_count[0] + 1, len(scheduleL)))

            schedule = scheduleL[i]
            schedule_str = str(schedule)
            if schedule_str in cache:
                return cache[schedule_str]

            T0 = time.time()
            (argL, output) = subprocess_args(schedule, schedule_str, True)
            res, out = run_timeout(argL, p.compile_timeout, last_line=True)
            Tcompile = time.time() - T0

            timer.compile_time = timer_compile + time.time() - Tbegin_compile
            with lock:
                compile_count[0] += 1

            if out is None:
                return {
                    'time': COMPILE_TIMEOUT,
                    'compile': Tcompile,
                    'run': 0.0,
                    'output': output
                }
            if not out.startswith('Success'):
                return {
                    'time': COMPILE_FAIL,
                    'compile': Tcompile,
                    'run': 0.0,
                    'output': output
                }
            return {
                'time': 0.0,
                'compile': Tcompile,
                'run': 0.0,
                'output': output
            }

        timer_compile = timer.compile_time
        Tbegin_compile = time.time()
        shuffled_idx = range(len(scheduleL))
        random.shuffle(shuffled_idx)
        compiledL = threadmap.map(compile_schedule, shuffled_idx, n=nproc)

        #Ttotal_compile = time.time()-Tbegin_compile

        assert len(compiledL) == len(scheduleL)

        # Run schedules in serial
        def run_schedule(i):
            status_callback('Run %d/%d' % (i + 1, len(scheduleL)))
            schedule = scheduleL[i]

            schedule_str = str(schedule)
            if schedule_str in cache:
                return cache[schedule_str]

            compiled_ans = compiledL[i]
            if get_error_str(compiled_ans['time']) is not None:
                return compiled_ans

            T0 = time.time()
            #res,out = run_timeout(subprocess_args(schedule, schedule_str, False), best_run_time[0]*p.run_timeout_mul*p.trials+p.run_timeout_bias, last_line=True)
            (argL, output) = subprocess_args(schedule, schedule_str, False)
            res, out = autotune_child(
                argL[2:], best_run_time[0] * p.run_timeout_mul * p.trials +
                p.run_timeout_bias +
                (p.run_save_timeout if save_output else 0.0))
            Trun = time.time() - T0

            if out is None:
                ans = {
                    'time': RUN_TIMEOUT,
                    'compile': compiled_ans['compile'],
                    'run': Trun,
                    'output': output
                }
            elif not out.startswith('Success') or len(out.split()) != 2:
                code = RUN_FAIL
                if out.startswith('RUN_CHECK_FAIL'):
                    code = RUN_CHECK_FAIL
                ans = {
                    'time': code,
                    'compile': compiled_ans['compile'],
                    'run': Trun,
                    'output': output
                }
            else:
                T = float(out.split()[1])
                best_run_time[0] = min(best_run_time[0], T)
                ans = {
                    'time': T,
                    'compile': compiled_ans['compile'],
                    'run': Trun,
                    'output': output
                }

            timer.run_time = timer_run + time.time() - Tbegin_run

            return ans

        # Run, cache and display schedule in serial
        def run_full(i):
            ans = run_schedule(i)

            schedule = scheduleL[i]
            cache.setdefault(str(schedule), ans)

            e = get_error_str(ans['time'])
            first_part = 'Error %s' % e if e is not None else 'Best time %.4f' % ans[
                'time']
            log_sched(
                p, schedule, '%s, compile=%.4f, run=%.4f' %
                (first_part, ans['compile'], ans['run']))
            return ans

        Tbegin_run = time.time()
        timer_run = timer.run_time
        runL = map(run_full, range(len(scheduleL)))

        #        for i in range(len(scheduleL)):
        #            schedule = scheduleL[i]
        #runL[i]['compile_avg'] = Ttotal_compile/len(scheduleL)

        return runL
コード例 #6
0
ファイル: forkmap.py プロジェクト: dlobue/nara
 def h2():
     return threadmap.map(lambda x: x ** 2, range(10 ** 6))
コード例 #7
0
ファイル: forkmap.py プロジェクト: dlobue/nara
 def h1():
     return threadmap.map(lambda x: pow(x, 10 ** 1000, 10 ** 9), range(10 ** 3))