Exemple #1
0
def cal(type,dep) :

    dataset = 64
    partition = 10

    #T1 Fusion 
    filename ="FusedKernelT"+str(type)
    if dep==1 :
        filename= filename+"Dependency.txt"
    else :
        filename =filename+"NoDependency.txt"

    change_stdout(filename)


    if dep== 0:

        info1 = json.loads(open("bilateralFilterKernel.json").read())
        info2 = json.loads(open("depth2vertexKernel.json").read())
        kernel1 = fw.Kernel(info1, dataset=dataset, partition=partition)
        kernel2 = fw.Kernel(info2,dataset=dataset,partition=partition)
        kernel_fused = kernel_fusion(kernel1,kernel2,type,0,0)

    else :
        info3 = json.loads(open("DependentKernel.json").read())
        kernel3 = fw.Kernel(info3[0],dataset=dataset,partition=partition)
        kernel4 = fw.Kernel(info3[1],dataset=dataset,partition=partition)
        kernel_fused = kernel_fusion(kernel3,kernel4,type,0,0)
Exemple #2
0
    def initialize(self):
        info = json.loads(open(self.info_file).read())
        kernel = fw.Kernel(info)

        if self.thread_coarsening_cpu > 1:
            kernel.src_cpu = self.thread_coarsen(self.info_file,
                                                 self.thread_coarsening_cpu,
                                                 self.stride)

        if self.thread_coarsening_gpu > 1:
            kernel.src_gpu = self.thread_coarsen(self.info_file,
                                                 self.thread_coarsening_gpu,
                                                 self.stride)

        kernel.src = kernel.src_cpu  # Redundant assignment to avoid errors

        kernel.macros = self.transform.macros

        print kernel.local_args
        for index in xrange(0, len(kernel.local_args)):
            print index
            kernel.local_args[index][
                'size'] = self.transform.local_buffer_sizes[index]

        for buffer_type in ['input', 'output']:
            for index in xrange(len(kernel.buffer_info[buffer_type])):
                kernel.buffer_info[buffer_type][index][
                    'size'] = self.transform.buffer_sizes[buffer_type][index]
                kernel.buffer_info[buffer_type][index][
                    'chunk'] = self.transform.buffer_chunks[buffer_type][index]
        kernel.local_work_size = self.transform.local_work_size
        kernel.global_work_size = self.transform.global_work_size
        kernel.local_chunk = self.transform.local_chunk
        self.kernel = kernel
    s_name = src_name[:-5]  #name of Kernel
    if args.log:
        logging.basicConfig(level=logging.DEBUG)
    cmd_qs, ctxs, gpus, cpus = fw.host_initialize(int(args.nGPU),
                                                  int(args.nCPU))
    info_file = args.file
    with open(info_file, "r") as f:
        info = json.loads(f.read())
    dataset = int(args.dataset_size)

    st = time.time()
    if args.dump_output_file:
        fw.dump_output = True
    if args.partition_class != None:
        partition = int(args.partition_class)
        kernel = fw.Kernel(info, dataset=dataset, partition=partition)
    else:
        kernel = fw.Kernel(info, dataset=dataset)
        partition = info['partition']

    name = s_name + '_' + str(partition) + '_' + str(dataset) + '_' + str(
        time.time()).replace(".", "")
    if args.log:
        f_path = fw.SOURCE_DIR + 'logs/' + name + '_debug.log'
        logging.basicConfig(filename=f_path, level=logging.DEBUG)
        print "LOG file is saved at %s" % f_path
    logging.debug('Number of GPUs : %d' % (len(gpus)))
    logging.debug(gpus)
    logging.debug('Number of CPUs : %d' % (len(cpus)))
    logging.debug(cpus)
    sched_start_time = datetime.datetime.now()
Exemple #4
0
        



#####This is the main code

if __name__ == '__main__':
    args = parse_arg(sys.argv[1:])
    task_files = args.file
    kernels = []
    for task in open(task_files,"r").readlines():
        task_src, partition, dataset = task.strip("\n").split(" ")
        info = json.loads(open(fw.SOURCE_DIR + "info/" + task_src).read())
        logging.debug( "Appending kernel" + task_src + " " + partition + " " + dataset)
        kernels.append(fw.Kernel(info, partition=int(partition), dataset=int(dataset)))


    for kernel1 in range(len(kernels)):
        for kernel2 in range(len(kernels)) :

            #fuse these two kernels and

    name = "scheduling_" + args.select +"_" +str(time.time()).replace(".", "")
    dump_dev =None
    if args.dump_output_file:
        fw.dump_output = True
    if args.log:
        f_path = fw.SOURCE_DIR + 'logs/' + name + '_debug.log'
        logging.basicConfig(filename=f_path, level=logging.DEBUG)
    if args.select == "baseline":