def main(i):

    # Load common table file (for all models)
    ddd = {}
    r = ck.load_json_file({'json_file': 'save_all_model_data_tmp.json'})
    if r['return'] == 0:
        ddd = r['dict']

    # Searching for features
    for gi in range(0, len(gcc)):
        g = gcc[gi]
        depth = best_depth[gi]

        ck.out(
            '********************************************************************************'
        )
        ck.out('Modeling optimizations for ' + g)
        ck.out('')

        if g not in ddd: ddd[g] = {}

        gx = g.replace(' ', '_')
        gx2 = g.replace(' ', '-')

        r = ck.load_json_file(
            {'json_file': 'prepare_train_data_tmp.' + gx + '.json'})
        if r['return'] > 0: ck.err(r)

        d = r['dict']

        ftable = d['ftable']
        ctable = d['ctable']

        s = '==============================================================\n'
        s += 'Depth: ' + str(depth) + '\n\n'

        ck.out(s)

        # Building decision tree on all data
        ck_model_entry_name = "rpi3-milepost-model-" + gx2.lower()

        ii = {
            'action': 'build',
            'module_uoa': 'model',
            'ftable': ftable,
            'ctable': ctable,
            'keep_temp_files': 'yes',
            "model_module_uoa": "model.sklearn",
            "model_name": "dtc",
            "model_file": "tmp-model-sklearn-dtc",
            "model_params": {
                "max_depth": depth
            },
            "model_repo_uoa": "ck-rpi-optimization-results",
            "model_data_uoa": ck_model_entry_name,
            "out": ""
        }

        # Training
        cii = copy.deepcopy(ii)

        r = ck.access(ii)
        if r['return'] > 0: ck.err(r)

        # Validating
        ii = copy.deepcopy(ii)

        ii['action'] = 'validate'

        r = ck.access(ii)
        if r['return'] > 0: ck.err(r)

        obs = r['observations']
        wrong = r['mispredictions']

        acc = float(obs - wrong) / float(obs)

        x = '  Accuracy on all data: ' + str(acc)
        s += x
        ck.out(x)

        # Record example of features to demo predictions (to be integrated with compiler optimization prediction (web)services)
        d = {
            "action": "use",
            "module_uoa": "model",
            "features": ftable[123],  # features of some random benchmark
            "model_module_uoa": "model.sklearn",
            "model_name": "dtc",
            "model_file": "tmp-model-sklearn-dtc",
            "model_data_uoa": ck_model_entry_name
        }

        r = ck.save_json_to_file({
            'json_file':
            'process_model_using_decision_trees_and_record_to_ck_use.' + gx +
            '.json',
            'dict':
            d
        })
        if r['return'] > 0: return r

    return {'return': 0}
예제 #2
0
def do(i):

    # List performance entries
    r=ck.access({'action':'search',
                 'module_uoa':'experiment',
                 'data_uoa':'ck-request-asplos18-mxnet-arm-performance*'
#                 'repo_uoa':'ck-request-asplos18-results'
                })
    if r['return']>0: return r
    lst=r['lst']

    for q in lst:
        duid=q['data_uid']
        duoa=q['data_uoa']
        ruid=q['repo_uid']
        path=q['path']

        ck.out(duoa)

        # Search matching accuracy entry
        r=ck.access({'action':'load',
                     'module_uoa':'experiment',
                     'data_uoa':duid,
                     'repo_uoa':ruid})
        if r['return']>0: return r

        dd=r['dict']
        ruid=r['repo_uid']
        apath=r['path']             

        # Updating meta if needed
        dd['meta']['scenario_module_uoa']='a555738be4b65860' # module:request.asplos18

        dd['meta']['dataset_species']='ImageNet' # dataset species (free format)

        dd['meta']['platform_species']='embedded' # embedded vs server (maybe other classifications such as edge)

        dd['meta']['platform_peak_power']=6.05 #Watts http://opensource.rock-chips.com/images/6/60/Rockchip_RK3399_Datasheet_V1.6-20170301.pdf last page
        dd['meta']['platform_price']=149 # $, http://shop.t-firefly.com/goods.php?id=45
        dd['meta']['platform_price_date']='20180416' # date

        dd['meta']['artifact']='08da9685582866a0' # artifact description

        dd['meta']['model_precision']='fp32'

        dd['meta']['processed']='yes'

        # Unified full name for some deps
        ds=dd['meta']['deps_summary']

        x=ds['mxnet-model']
        r=ck.access({'action':'make_deps_full_name','module_uoa':'request.asplos18','deps':x})
        if r['return']>0: return r
        full_model_name=r['full_name']

        dd['meta']['model_design_name']=full_model_name

        # for simplicity add manually (can later automate it as in other artifacts, but just didn't have time here)
        if 'mobilenet' in full_model_name:
           dd['meta']['model_species']='07d4e7aa3750ddc6' # model.species:resnet18
           dd['meta']['dataset_size']=50000 # number of images ...
           accuracy_top1=0.66694
           accuracy_top5=0.87734
        elif 'resnet' in full_model_name:
           dd['meta']['model_species']='d41bbf1e489ab5e0' # model.species:resnet18
           dd['meta']['dataset_size']=25000 # number of images ...
           accuracy_top1=0.61318
           accuracy_top5=0.83702
        elif 'vgg16' in full_model_name:
           dd['meta']['model_species']='a3fcac86d42bdbc4' # model.species:resnet18
           dd['meta']['dataset_size']=5000 # number of images ...
           accuracy_top1=0.63120
           accuracy_top5=0.84951
        else:
           return {'return':1, 'error':'unknown model ('+y+')'}

        x=ds['lib-mxnet']
        r=ck.access({'action':'make_deps_full_name','module_uoa':'request.asplos18','deps':x})
        if r['return']>0: return r
        dd['meta']['library_name']=r['full_name']

        x=x['deps']['compiler']
        r=ck.access({'action':'make_deps_full_name','module_uoa':'request.asplos18','deps':x})
        if r['return']>0: return r
        dd['meta']['compiler_name']=r['full_name']

        # Updating entry
        r=ck.access({'action':'update',
                     'module_uoa':'experiment',
                     'data_uoa':duid,
                     'repo_uoa':ruid,
                     'dict':dd,
                     'substitute':'yes',
                     'ignore_update':'yes',
                     'sort_keys':'yes'
                    })
        if r['return']>0: return r

        # Checking points to aggregate
        os.chdir(path)
        dperf=os.listdir(path)
        for f in dperf:
            if f.endswith('.cache.json'):
               os.system('git rm -f '+f)

            elif f.endswith('.flat.json'):
               ck.out(' * '+f)

               # Load performance file 
               p1=os.path.join(path, f)

               r=ck.load_json_file({'json_file':p1})
               if r['return']>0: return r
               d1=r['dict']

               # Prune some old value
               d={}
               for k in d1:
                   if not k.startswith('##characteristics#run#accuracy_top1') and \
                      not k.startswith('##characteristics#run#accuracy_top5') and \
                      not k.startswith('##characteristics#run#inference_throughput') and \
                      not k.startswith('##characteristics#run#inference_latency'):
                      d[k]=d1[k]

               # for simplicity add manually (can later automate it as in other artifacts, but just didn't have time here)
               if 'mobilenet' in full_model_name:
                  model_size=17024109
               elif 'resnet' in full_model_name:
                  model_size=46803089
               elif 'vgg16' in full_model_name:
                  model_size=553432060
               else:
                  return {'return':1, 'error':'unknown model ('+y+')'}

               d['##features#model_size#min']=model_size # Bytes

               d['##features#gpu_freq#min']=''
               d['##features#cpu_freq#min']=1416
               d['##features#freq#min']=d['##features#cpu_freq#min']

               d['##features#processed#min']='yes'

               # Add throughput (images/second)
               tall=d.get('##characteristics#run#execution_time_classify#all',[]) # It's internal VTA measurements
               if len(tall)>0:
                  tnew=[]
                  for t in tall:
                      t1=1/t
                      tnew.append(t1)
                  
                  r=ck.access({'action':'stat_analysis',
                               'module_uoa':'experiment',
                               'dict':d,
                               'dict1':{'##characteristics#run#inference_throughput':tnew}
                              })
                  if r['return']>0: return r

               # Unify batch size
               batch=1 # for now only 1 is supported in this artifact
               d['##features#batch_size#min']=batch

               # inference latency
               d['##features#measuring_latency#min']='yes'

               r=ck.access({'action':'stat_analysis',
                            'module_uoa':'experiment',
                            'dict':d,
                            'dict1':{'##characteristics#run#inference_latency':tall}
                           })
               if r['return']>0: return r

               r=ck.access({'action':'stat_analysis',
                            'module_uoa':'experiment',
                            'dict':d,
                            'dict1':{'##characteristics#run#prediction_time_avg_s':tall}
                           })
               if r['return']>0: return r

               # Add accuracy (was calculated through separate experiment)
               r=ck.access({'action':'stat_analysis',
                            'module_uoa':'experiment',
                            'dict':d,
                            'dict1':{'##characteristics#run#accuracy_top1':[accuracy_top1]}
                           })
               if r['return']>0: return r

               # Add accuracy (was calculated through separate experiment)
               r=ck.access({'action':'stat_analysis',
                            'module_uoa':'experiment',
                            'dict':d,
                            'dict1':{'##characteristics#run#accuracy_top5':[accuracy_top5]}
                           })
               if r['return']>0: return r

               # Save updated dict
               r=ck.save_json_to_file({'json_file':p1, 'dict':d, 'sort_keys':'yes'})
               if r['return']>0: return r

    return {'return':0}
예제 #3
0
def main(i):

    # Load common table file (for all models)
    ddd = {}
    r = ck.load_json_file({'json_file': 'save_all_model_data_tmp.json'})
    if r['return'] == 0:
        ddd = r['dict']

    # Searching for features
    for g in gcc:
        ck.out(
            '********************************************************************************'
        )
        ck.out('Preparing accuracy graph for ' + g)
        ck.out('')

        gx = g.replace(' ', '_')

        d = ddd[g]

        table = {"0": [], "1": []}

        for depth in range(1, 30):
            key1 = 'decision_trees_with_cross_validation_depth_' + str(
                depth) + '_ft1_ft65'
            key2 = 'decision_trees_without_cross_validation_depth_' + str(
                depth) + '_ft1_ft65'

            acc1 = d[key1]
            acc2 = d[key2]

            table["0"].append([depth, acc1])
            table["1"].append([depth, acc2])

        # Graph input
        ii = {
            "action":
            "plot",
            "module_uoa":
            "graph",
            "table":
            table,
            "add_x_loop":
            "no",
            "ignore_point_if_none":
            "yes",
            "plot_type":
            "mpl_2d_scatter",
            "display_y_error_bar":
            "no",
            "title":
            "Powered by Collective Knowledge",
            "axis_x_desc":
            "Decision tree depth",
            "axis_y_desc":
            "Model accuracy for " + g + " (%)",
            "plot_grid":
            "yes",
            "mpl_image_size_x":
            "12",
            "mpl_image_size_y":
            "6",
            "mpl_image_dpi":
            "100",
            "font_size":
            22,
            "out_to_file":
            'process_model_using_decision_trees_accuracy_graph_output.' + gx +
            '.pdf',
            "point_style": {
                "0": {
                    "marker": "o"
                },
                "1": {
                    "marker": "x"
                }
            }
        }

        # Save common data (continuously, not to loose data)
        r = ck.save_json_to_file({
            'json_file':
            'process_model_using_decision_trees_accuracy_graph_input.' + gx +
            '.json',
            'dict':
            ii
        })
        if r['return'] > 0: return r

        # Plot graph (save to pdf)
        r = ck.access(ii)
        if r['return'] > 0: return r

    return {'return': 0}
def run(i):
    # Get path1
    r=ck.access({'action':'load',
                 'module_uoa':'experiment',
                 'data_uoa':euoa1})
    if r['return']>0: return r
    p1=r['path']

    # Get path1
    r=ck.access({'action':'load',
                 'module_uoa':'experiment',
                 'data_uoa':euoa2})
    if r['return']>0: return r
    p2=r['path']

    # Going through points in path 2
    dirList1=os.listdir(p1)
    dirList2=os.listdir(p2)

    for fn2 in dirList2:
        if fn2.endswith('.features.json'):
           ck.out('Loading point '+fn2+' ...')

           px2=os.path.join(p2, fn2)

           r=ck.load_json_file({'json_file':px2})
           if r['return']>0: return r

           df2=r['dict'].get('features',{})

           # Searching in p1
           found=False
           for fn1 in dirList1:
               if fn1.endswith('.features.json'):
                  px1=os.path.join(p1, fn1)

                  r=ck.load_json_file({'json_file':px1})
                  if r['return']>0: return r

                  df1=r['dict'].get('features',{})
     
                  rx=ck.compare_dicts({'dict1':df1, 'dict2':df2})
                  if rx['return']>0: return rx

                  equal=rx['equal']
                  if equal=='yes':
                     found=True
                     break

           if found:
              ck.out('  Found!')
           else:
              # Removing point
              ck.out('    Removing point ...')

              fn=fn2[:-14]

              for fn0 in dirList2:
                  if fn0.startswith(fn):
                     p0=os.path.join(p2,fn0)
                     os.remove(p0)

    return {'return':0}
def main(i):

    # Load common table file (for all models)
    ddd = {}
    r = ck.load_json_file({'json_file': 'save_all_model_data_tmp.json'})
    if r['return'] == 0:
        ddd = r['dict']

    # Searching for features
    for g in gcc:
        random.seed(12345)

        ck.out(
            '********************************************************************************'
        )
        ck.out('Modeling optimizations for ' + g)
        ck.out('')

        if g not in ddd: ddd[g] = {}

        gx = g.replace(' ', '_')

        r = ck.load_json_file(
            {'json_file': 'prepare_train_data_tmp.' + gx + '.json'})
        if r['return'] > 0: ck.err(r)

        d = r['dict']

        ftable = d['ftable']
        ctable = d['ctable']

        s = ''

        for iteration in range(1, 10):
            start_time = time.time()

            # Full cross-validation
            acc = 0
            obs = 0
            wrong = 0

            acc_min = None
            acc_max = None

            s = '==============================================================\n'
            s += 'Iteration: ' + str(iteration) + '\n\n'

            if iteration == 1:
                # Default params from TF example
                hu = [10, 20, 10]
                ts = 1000
            else:
                # Generate random DNN topology and params
                hu = []
                nhu = random.randint(1, 5)

                for k in range(0, nhu):
                    x = random.randint(10, 30)
                    hu.append(x)

                ts = random.randint(1000, 3000)

            s += '  Hidden units:   ' + str(hu) + '\n'
            s += '  Training steps: ' + str(ts) + '\n\n'

            ck.out(s)

            for n in range(
                    0, 3
            ):  # Trying to build model N times (random - sometimes slightly different result)

                # Building decision tree on all data
                ii = {
                    'action': 'build',
                    'module_uoa': 'model',
                    'ftable': ftable,
                    'ctable': ctable,
                    'keep_temp_files': 'yes',
                    "model_module_uoa": "model.tf",
                    "model_name": "dnn_classifier",
                    "model_file": "process_model_using_dnn_tf/" + gx,
                    "model_params": {
                        "hidden_units": hu,
                        "training_steps": ts
                    },
                    "out": ""
                }

                # Training
                cii = copy.deepcopy(ii)

                r = ck.access(ii)
                if r['return'] > 0: ck.err(r)

                # Validating
                ii = copy.deepcopy(ii)

                ii['action'] = 'validate'

                r = ck.access(ii)
                if r['return'] > 0: ck.err(r)

                obs += r['observations']
                wrong += r['mispredictions']

                acc = float(obs - wrong) / float(obs)

                x = '\n  Accuracy on all data (' + str(
                    n + 1) + ' out of 3):   ' + str(acc) + '\n'
                s += x
                ck.out(x)

                acc = float(obs - wrong) / float(obs)

                if acc_min == None or acc < acc_min:
                    acc_min = acc

                if acc_max == None or acc > acc_max:
                    acc_max = acc

            stop_time = time.time() - start_time

            x = '\n\nIteration: ' + str(
                iteration
            ) + ' ; accuracy (min/max): ' + '%.2f' % acc_min + ' .. ' + '%.2f' % acc_max + '\n'
            x = '\n  Elapsed time: ' + '%.1f' % stop_time + ' sec.\n'
            s += x
            ck.out(x)

            # Cross-validating (for simplicity 1 run)
            cross_obs = 0
            cross_wrong = 0

            x = '  *************************************************\n'
            x += '  Cross-validating model (leave one out)\n\n'
            s += x
            ck.out(x)

            for bench in range(0, len(ftable)):
                train_ftable = []
                train_ctable = []
                test_ftable = []
                test_ctable = []

                for k in range(0, len(ftable)):
                    if k != bench:
                        train_ftable.append(ftable[k])
                        train_ctable.append(ctable[k])
                    else:
                        test_ftable.append(ftable[k])
                        test_ctable.append(ctable[k])

                # Selecting model
                ii = {
                    'action':
                    'build',
                    'module_uoa':
                    'model',
                    'ftable':
                    train_ftable,
                    'ctable':
                    train_ctable,
                    'keep_temp_files':
                    'no',
                    "model_module_uoa":
                    "model.tf",
                    "model_name":
                    "dnn_classifier",
                    "model_file":
                    "tmp-model-tf-dnn-classifier-" + gx + '_' + str(iteration),
                    "model_params": {
                        "hidden_units": hu,
                        "training_steps": ts
                    },
                    "out":
                    ""
                }

                # Training
                cii = copy.deepcopy(ii)

                r = ck.access(ii)
                if r['return'] > 0: ck.err(r)

                # Validating
                ii = copy.deepcopy(ii)

                ii['action'] = 'validate'
                ii['ftable'] = test_ftable
                ii['ctable'] = test_ctable
                #                ii['ftable']=ftable
                #                ii['ctable']=ctable

                r = ck.access(ii)
                if r['return'] > 0: ck.err(r)

                cross_obs += r['observations']
                cross_wrong += r['mispredictions']

                cross_acc = float(cross_obs - cross_wrong) / float(cross_obs)

                x = '\n    ' + str(bench) + ' out of ' + str(
                    len(ftable)
                ) + ' ) current cross-validation accuracy: ' + '%.2f' % cross_acc + '\n'
                s += x
                ck.out(x)

            stop_time = time.time() - start_time

            x = '\nIteration: ' + str(
                iteration
            ) + ' ; accuracy (with cross-validation): ' + '%.2f' % cross_acc + '\n'
            x = '\n  Elapsed time: ' + '%.1f' % stop_time + ' sec.\n'
            s += x

            ck.out(x)

            ddd[g]['dnn_tf_with_cross_validation_iteration_' + str(iteration) +
                   '_ft1_ft65'] = cross_acc
            ddd[g]['dnn_tf_without_cross_validation_iteration_' +
                   str(iteration) + '_ft1_ft65'] = acc_max

        r = ck.save_text_file({
            'text_file':
            'process_model_using_decision_trees/log.' + gx + '.txt',
            'string':
            s
        })
        if r['return'] > 0: return r

        # Save common data (continuously, not to loose data)
        r = ck.save_json_to_file({
            'json_file': 'save_all_model_data_tmp.json',
            'dict': ddd
        })
        if r['return'] > 0: return r

    return {'return': 0}
             if tp!=None:
                ii['mode']=tp

             r=ck.access(ii)
             if r['return']==0:
                shutil.copy(of2,of3)

             print ' * converted'

   # Record run-time state to html
   if os.path.isfile(js):
      tt=os.path.getmtime(js)
      if tjs==None or tjs!=tt:
         tjs=tt

         r=ck.load_json_file({'json_file':js})
         if r['return']==0:
            d=r['dict']

            rts=d.get('run_time_state',{})

            risx=rts.get("input_size_x",'')
            risy=rts.get("input_size_y",'')
            rfps=rts.get("run_time_fps",'')
            rtt=rts.get("run_time_total",'')
            rf=rts.get("frames",'')

            rocld=rts.get("opencl_device", '')
            roclp=rts.get("opencl_platform",'')

            roclu=rts.get("opencl_device_units",'')
예제 #7
0
def main(i):

    # Load data
    r = ck.load_json_file(
        {'json_file': 'autotune_for_input_aware_blas_fixed.json'})
    if r['return'] > 0: return r
    ddd = r['dict']

    table = {"0": [], "1": []}

    # Find RPi3
    found = False
    for a in ddd.get('all', []):
        if a.get('cpu_name', '') == 'BCM2709':
            found = True
            break

    if not found:
        return {'return': 1, 'error': 'RPi3 data not found'}

    data = a.get('data', {})

    for N in sorted(data, key=lambda x: int(x)):
        results = data[N]

        gflops_default = None
        tmin_default = None

        gflops_best = None
        tmin_best = None

        for BS in sorted(results, key=lambda x: int(x)):
            x = results[BS]

            tmin = x['tmin']
            gflops = x['gflops']

            if BS == N:
                gflops_default = gflops
                tmin_default = tmin

                table['0'].append([int(N), gflops_default])
            else:
                if gflops_best == None or gflops > gflops_best:
                    gflops_best = gflops
                    tmin_best = tmin

        if gflops_best != None:
            table['1'].append([int(N), gflops_best])

    # Graph input
    ii = {
        "action": "plot",
        "module_uoa": "graph",
        "table": table,
        "plot_type": "mpl_2d_scatter",

        #          "title":"Powered by Collective Knowledge",
        "axis_x_desc": "Square matrix size",
        "axis_y_desc": "GFLOPS",
        "plot_grid": "no",
        "mpl_image_size_x": "12",
        "mpl_image_size_y": "6",
        "mpl_image_dpi": "100",
        "font_size": 22,

        #          "out_to_file":'process_data_and_plot_graph_out.pdf',
        "point_style": {
            "0": {
                "marker": "o"
            },
            "1": {
                "marker": "x"
            }
        }
    }

    # Save common data (continuously, not to loose data)
    r = ck.save_json_to_file({
        'json_file': 'process_data_and_plot_graph_out.json',
        'dict': ii
    })
    if r['return'] > 0: return r

    return {'return': 0}
def main(i):

    cur_dir = os.getcwd()
    fas = os.path.join(cur_dir, aggregated_stats)

    # Get some info about current platform
    ii = {'action': 'detect', 'module_uoa': 'platform', 'out': 'con'}

    r = ck.access(ii)
    if r['return'] > 0: return r

    hos = r['host_os_uid']
    hosx = r['host_os_uoa']
    hosd = r['host_os_dict']

    tos = r['os_uid']
    tosx = r['os_uoa']
    tosd = r['os_dict']

    cpu_name = r['features']['cpu']['name']
    plat_name = r['features']['platform']['name']

    #############################################################
    ck.out(line)
    ck.out('CPU name: ' + cpu_name)
    ck.out('Plat name: ' + plat_name)

    #############################################################
    ck.out(line)
    ck.out('Loading aggregated stats ...')

    aa = []
    if os.path.isfile(fas):
        r = ck.load_json_file({'json_file': fas})
        if r['return'] > 0: return r
        ax = r['dict']

        if 'all' not in ax: ax['all'] = []
        aa = ax['all']

    #############################################################
    ck.out(line)
    ck.out('Finding entry related to this platform ...')

    found = False
    for a in aa:
        if a.get('cpu_name', '') == cpu_name and a.get('plat_name',
                                                       '') == plat_name:
            found = True

    if not found:
        a = {'cpu_name': cpu_name, 'plat_name': plat_name}
        aa.append(a)

    if 'data' not in a: a['data'] = {}
    data = a.get('data', {})

    # Init pipeline
    r = ck.access({
        'action': 'pipeline',
        'module_uoa': 'program',
        'data_uoa': 'shared-matmul-c2',
        'cpu_freq': 'max',
        'gpu_freq': 'max',
        'speed': 'yes',
        'compiler_vars': {
            'USE_BLOCKED_MATMUL': 'YES'
        },
        'no_state_check': 'yes',
        'prepare': 'yes',
        'out': 'con'
    })
    if r['return'] > 0: return r

    ready = r['ready']
    if ready != 'yes':
        return {'return': 1, 'error': 'can\'t init pipeline'}

    pipeline = r

    # Compile program ones
    tpipeline = copy.deepcopy(pipeline)
    r = ck.access({
        'action': 'autotune',
        'module_uoa': 'pipeline',
        'pipeline': pipeline,
        'pipeline_update': {
            'env': {
                'CT_MATRIX_DIMENSION': 16,
                'CT_BLOCK_SIZE': 16
            }
        },
        'iterations': 1,
        'repetitions': 1,
        'out': 'con'
    })
    if r['return'] > 0: return r
    lsa = r.get('last_stat_analysis', {}).get('dict_flat', {})
    time_min = lsa.get('##characteristics#run#execution_time#min', None)
    if time_min == None or time_min == 0.0:
        return {'return': 1, 'error': 'failed to run default pipeline'}

    # data is per N size
    while True:  # continue infinite loop until stopping
        ck.out(line)

        n = random.randint(0, 3)  # Matrix size generator
        if n == 0:
            N = random.randint(4, 1024)  # Matrix size
        else:
            NX = random.randint(2, 10)
            N = 2**NX
            if n == 2: N = N - 1
            if n == 3: N = N + 1

        SN = str(N)

        if SN not in data: data[SN] = {}
        xdata = data.get(SN, {})

        tmin = xdata.get('tmin', None)
        tmax = xdata.get('tmax', None)
        gmin = xdata.get('gmin', None)
        gmax = xdata.get('gmax', None)
        best_tile = xdata.get('best_tile', None)

        for opts in range(0, 16):
            # Choose if random BS or power of two or power of two -+1
            if opts == 0:
                BS = 1
            elif opts == 1:
                BS = N
            else:
                b = random.randint(0, 3)

                if b == 0:
                    BS = random.randint(1, N)
                else:
                    B1 = math.frexp(N)[1] - 1
                    B2 = random.randint(0, B1)
                    BS = 2**B2

                    if b == 2 and BS > 1: BS = BS - 1
                    elif b == 3 and BS < N - 1: BS = BS + 1

            ck.out('Matrix size: ' + str(N))
            ck.out('Tile size:   ' + str(BS))

            # Run pipeline
            tpipeline = copy.deepcopy(pipeline)
            r = ck.access({
                'action': 'autotune',
                'module_uoa': 'pipeline',
                'pipeline': pipeline,
                'pipeline_update': {
                    'no_compile': 'yes',
                    'env': {
                        'CT_MATRIX_DIMENSION': N,
                        'CT_BLOCK_SIZE': BS
                    }
                },
                'iterations': 1,
                'repetitions': 3,
                'out': 'con'
            })
            if r['return'] > 0: return r

            lsa = r.get('last_stat_analysis', {}).get('dict_flat', {})
            time_min = lsa.get('##characteristics#run#execution_time#min',
                               None)

            changed = False
            if time_min != None:
                ops = 2 * (N * N * N)
                if tmin == None or time_min < tmin:
                    tmin = time_min
                    best_tile = BS
                    gmax = 1.0e-9 * ops / tmin
                    changed = True
                if tmax == None or time_min > tmax:
                    tmax = time_min
                    gmin = 1.0e-9 * ops / tmax
                    changed = True

                if changed:
                    xdata['tmin'] = tmin
                    xdata['tmax'] = tmax
                    xdata['gmin'] = gmin
                    xdata['gmax'] = gmax
                    xdata['best_tile'] = best_tile

                if opts == 0:
                    xdata['tbs1'] = time_min
                    xdata['gbs1'] = 1.0e-9 * ops / time_min
                    changed = True
                elif opts == 1:
                    xdata['tbsn'] = time_min
                    xdata['gbsn'] = 1.0e-9 * ops / time_min
                    xdata['bsn'] = N
                    changed = True

            if changed:
                ck.out(line)
                ck.out('Saving aggregated stats ...')

                r = ck.save_json_to_file({
                    'json_file': fas,
                    'dict': {
                        'all': aa
                    },
                    'sort_keys': 'yes'
                })
                if r['return'] > 0: return r

    #############################################################
    ck.out(line)
    ck.out('Saving aggregated stats ...')

    r = ck.save_json_to_file({
        'json_file': fas,
        'dict': {
            'all': aa
        },
        'sort_keys': 'yes'
    })
    if r['return'] > 0: return r

    return {'return': 0}
def main(i):

    # Load common table file (for all models)
    ddd = {}
    r = ck.load_json_file({'json_file': 'save_all_model_data_tmp.json'})
    if r['return'] == 0:
        ddd = r['dict']

    # Searching for features
    for g in gcc:
        ck.out(
            '********************************************************************************'
        )
        ck.out('Modeling optimizations for ' + g)
        ck.out('')

        if g not in ddd: ddd[g] = {}

        gx = g.replace(' ', '_')

        r = ck.load_json_file(
            {'json_file': 'prepare_train_data_tmp.' + gx + '.json'})
        if r['return'] > 0: ck.err(r)

        d = r['dict']

        ftable = d['ftable']
        ctable = d['ctable']

        s = ''

        for depth in range(1, 30):
            # Full cross-validation
            acc = 0
            obs = 0
            wrong = 0

            acc_min = None
            acc_max = None

            s = '==============================================================\n'
            s += 'Depth: ' + str(depth) + '\n\n'

            ck.out(s)

            for n in range(
                    0, 3
            ):  # Trying to build model N times (random - sometimes slightly different result)

                # Building decision tree on all data
                ii = {
                    'action':
                    'build',
                    'module_uoa':
                    'model',
                    'ftable':
                    ftable,
                    'ctable':
                    ctable,
                    'keep_temp_files':
                    'yes',
                    "model_module_uoa":
                    "model.sklearn",
                    "model_name":
                    "dtc",
                    "model_file":
                    "process_model_using_decision_trees/model-sklearn-dtc-" +
                    gx + '-depth' + str(depth),
                    "model_params": {
                        "max_depth": depth
                    },
                    "out":
                    ""
                }

                # Training
                cii = copy.deepcopy(ii)

                r = ck.access(ii)
                if r['return'] > 0: ck.err(r)

                # Validating
                ii = copy.deepcopy(ii)

                ii['action'] = 'validate'

                r = ck.access(ii)
                if r['return'] > 0: ck.err(r)

                obs += r['observations']
                wrong += r['mispredictions']

                acc = float(obs - wrong) / float(obs)

                x = '  Accuracy on all data (' + str(
                    n + 1) + ' out of 3):   ' + str(acc)
                s += x
                ck.out(x)

                acc = float(obs - wrong) / float(obs)

                if acc_min == None or acc < acc_min:
                    acc_min = acc

                if acc_max == None or acc > acc_max:
                    acc_max = acc

            x = '\nDepth: ' + str(
                depth
            ) + ' ; accuracy (min/max): ' + '%.2f' % acc_min + ' .. ' + '%.2f' % acc_max + '\n'
            s += x
            ck.out(x)

            # Cross-validating (for simplicity 1 run)
            cross_obs = 0
            cross_wrong = 0

            x = '  *************************************************\n'
            x += '  Cross-validating model (leave one out)\n\n'
            s += x
            ck.out(x)

            for bench in range(0, len(ftable)):
                train_ftable = []
                train_ctable = []
                test_ftable = []
                test_ctable = []

                for k in range(0, len(ftable)):
                    if k != bench:
                        train_ftable.append(ftable[k])
                        train_ctable.append(ctable[k])
                    else:
                        test_ftable.append(ftable[k])
                        test_ctable.append(ctable[k])

                # Selecting model
                ii = {
                    'action': 'build',
                    'module_uoa': 'model',
                    'ftable': train_ftable,
                    'ctable': train_ctable,
                    'keep_temp_files': 'no',
                    "model_module_uoa": "model.sklearn",
                    "model_name": "dtc",
                    "model_file": "tmp-model-sklearn-dtc",
                    "model_params": {
                        "max_depth": depth
                    },
                    "out": ""
                }

                # Training
                cii = copy.deepcopy(ii)

                r = ck.access(ii)
                if r['return'] > 0: ck.err(r)

                # Validating
                ii = copy.deepcopy(ii)

                ii['action'] = 'validate'
                ii['ftable'] = test_ftable
                ii['ctable'] = test_ctable

                r = ck.access(ii)
                if r['return'] > 0: ck.err(r)

                cross_obs += r['observations']
                cross_wrong += r['mispredictions']

                cross_acc = float(cross_obs - cross_wrong) / float(cross_obs)

                x = '    ' + str(bench) + ' out of ' + str(
                    len(ftable)
                ) + ' ) current cross-validation accuracy: ' + '%.2f' % cross_acc
                s += x
                ck.out(x)

            x = '\nDepth: ' + str(
                depth
            ) + ' ; accuracy (with cross-validation): ' + '%.2f' % cross_acc + '\n'
            s += x
            ck.out(x)

            ddd[g]['decision_trees_with_cross_validation_depth_' + str(depth) +
                   '_ft1_ft65'] = cross_acc
            ddd[g]['decision_trees_without_cross_validation_depth_' +
                   str(depth) + '_ft1_ft65'] = acc_max

        r = ck.save_text_file({
            'text_file':
            'process_model_using_decision_trees/log.' + gx + '.txt',
            'string':
            s
        })
        if r['return'] > 0: return r

        # Save common data (continuously, not to loose data)
        r = ck.save_json_to_file({
            'json_file': 'save_all_model_data_tmp.json',
            'dict': ddd
        })
        if r['return'] > 0: return r

    return {'return': 0}
def do(i):

    # List performance entries
    r = ck.access({
        'action': 'search',
        'module_uoa': 'experiment',
        'data_uoa': 'ck-request-asplos18-tvm-fpga-performance-*'
        #                 'repo_uoa':'ck-request-asplos18-results'
    })
    if r['return'] > 0: return r
    lst = r['lst']

    for q in lst:
        duid = q['data_uid']
        duoa = q['data_uoa']
        ruid = q['repo_uid']
        path = q['path']

        ck.out(duoa)

        # Search matching accuracy entry
        r = ck.access({
            'action': 'load',
            'module_uoa': 'experiment',
            'data_uoa': duid,
            'repo_uoa': ruid
        })
        if r['return'] > 0: return r

        dd = r['dict']
        ruid = r['repo_uid']
        apath = r['path']

        # Updating meta if needed
        dd['meta'][
            'scenario_module_uoa'] = 'a555738be4b65860'  # module:request.asplos18

        dd['meta'][
            'model_species'] = 'd41bbf1e489ab5e0'  # model.species:resnet18

        dd['meta'][
            'dataset_species'] = 'ImageNet'  # dataset species (free format)
        dd['meta']['dataset_size'] = 2000  # number of images ...

        dd['meta'][
            'platform_species'] = 'fpga'  # embedded vs server vs fpga (maybe other classifications such as edge)

        dd['meta']['platform_peak_power'] = 2.5  #Watts
        dd['meta']['platform_price'] = 229  # $
        dd['meta']['platform_price_date'] = '20180404'  # date

        dd['meta']['artifact'] = '9375838469ad4029'  # artifact description

        dd['meta']['model_precision'] = 'int8'

        dd['meta']['processed'] = 'yes'

        # Unified full name for some deps
        ds = dd['meta']['deps_summary']

        x = ds['model']
        r = ck.access({
            'action': 'make_deps_full_name',
            'module_uoa': 'request.asplos18',
            'deps': x
        })
        if r['return'] > 0: return r

        dd['meta']['model_design_name'] = r['full_name']
        dd['meta']['plat_name'] = 'Xilinx PYNQ-Z1 FPGA (ZYNQ XC7Z020-1CLG400C)'
        dd['meta']['os_name'] = 'Ubuntu 15.10'
        dd['meta'][
            'cpu_name'] = 'Programmable logic equivalent to Artix-7 FPGA'

        # Updating entry
        r = ck.access({
            'action': 'update',
            'module_uoa': 'experiment',
            'data_uoa': duid,
            'repo_uoa': ruid,
            'dict': dd,
            'substitute': 'yes',
            'ignore_update': 'yes',
            'sort_keys': 'yes'
        })
        if r['return'] > 0: return r

        # Checking points to aggregate
        os.chdir(path)
        dperf = os.listdir(path)
        for f in dperf:
            if f.endswith('.cache.json'):
                os.system('git rm -f ' + f)

            elif f.endswith('.flat.json'):
                ck.out(' * ' + f)

                # Load performance file
                p1 = os.path.join(path, f)

                r = ck.load_json_file({'json_file': p1})
                if r['return'] > 0: return r
                d1 = r['dict']

                # Prune some old value
                d = {}
                for k in d1:
                    if not k.startswith('##characteristics#run#accuracy_top1') and \
                       not k.startswith('##characteristics#run#accuracy_top5') and \
                       not k.startswith('##characteristics#run#inference_throughput') and \
                       not k.startswith('##characteristics#run#inference_latency'):
                        d[k] = d1[k]

                d['##features#model_size#min'] = 129770000  # Bytes

                d['##features#gpu_freq#min'] = 100
                d['##features#cpu_freq#min'] = ''
                d['##features#freq#min'] = d['##features#gpu_freq#min']

                d['##features#processed#min'] = 'yes'

                # Add throughput (images/second)
                tall = d.get(
                    '##characteristics#run#execution_time_classify_internal#all',
                    [])  # It's internal VTA measurements
                if len(tall) > 0:
                    tnew = []
                    for t in tall:
                        t1 = 1 / t
                        tnew.append(t1)

                    r = ck.access({
                        'action': 'stat_analysis',
                        'module_uoa': 'experiment',
                        'dict': d,
                        'dict1': {
                            '##characteristics#run#inference_throughput': tnew
                        }
                    })
                    if r['return'] > 0: return r

                # Unify batch size
                batch = 1  # for now only 1 is supported in this artifact
                d['##features#batch_size#min'] = batch

                # inference latency
                d['##features#measuring_latency#min'] = 'yes'

                r = ck.access({
                    'action': 'stat_analysis',
                    'module_uoa': 'experiment',
                    'dict': d,
                    'dict1': {
                        '##characteristics#run#inference_latency': tall
                    }
                })
                if r['return'] > 0: return r

                r = ck.access({
                    'action': 'stat_analysis',
                    'module_uoa': 'experiment',
                    'dict': d,
                    'dict1': {
                        '##characteristics#run#prediction_time_avg_s': tall
                    }
                })
                if r['return'] > 0: return r

                # Add accuracy (was calculated through separate experiment)
                r = ck.access({
                    'action': 'stat_analysis',
                    'module_uoa': 'experiment',
                    'dict': d,
                    'dict1': {
                        '##characteristics#run#accuracy_top1': [accuracy_top1]
                    }
                })
                if r['return'] > 0: return r

                # Add accuracy (was calculated through separate experiment)
                r = ck.access({
                    'action': 'stat_analysis',
                    'module_uoa': 'experiment',
                    'dict': d,
                    'dict1': {
                        '##characteristics#run#accuracy_top5': [accuracy_top5]
                    }
                })
                if r['return'] > 0: return r

                # Save updated dict
                r = ck.save_json_to_file({
                    'json_file': p1,
                    'dict': d,
                    'sort_keys': 'yes'
                })
                if r['return'] > 0: return r

    return {'return': 0}
예제 #11
0
for q in compilers:
    s='rpi3-snapshot-'+q.lower().replace(' ','-')+'-autotuning'

    # Find entry
    r=ck.access({'action':'load',
                 'module_uoa':'slide',
                 'data_uoa':s})
    if r['return']>0: ck.err(r)
    p=r['path']
    d=r['dict']

    d1=d['slides'][0]

    p1=os.path.join(p,d1+'.json')
    
    r=ck.load_json_file({'json_file':p1})
    if r['return']>0: ck.err(r)

    d2=r['dict']

    classes[q]={'-O3':[]}
    uopts[q]={'-O3':0}
    improvements[q]={}
    individual_flags[q]={}

    for t in d2['table']:
        num=t['solution_num']
        flags=t['best_flags'].strip()

        classes[q][flags]=[]
        uopts[q][flags]=num
def main(i):

    # Load common table file (for all models)
    r = ck.load_json_file({'json_file': 'save_all_model_data_tmp.json'})
    if r['return'] > 0: return r
    d = r['dict']

    # Get all models/features
    kk = list(d['GCC 4.9.2'])

    table = []
    table_milepost = []
    table_short = []

    ikk = []
    for k in kk:
        sort = 0
        k1 = k

        j = k.find('_depth_')
        if j > 0:
            j1 = k.find('_', j + 7)
            if j1 > 0:
                sort = int(k[j + 7:j1])
                k1 = k[:j]

        j = k.find('_iteration_')
        if sort == '' and j > 0:
            j1 = k.find('_', j + 11)
            if j1 > 0:
                sort = int(k[j + 11:j1])
                k1 = k[:j]

        ikk.append([k1, sort, k])

    for k1 in sorted(ikk, key=lambda x: (x[0], x[1])):
        k = k1[2]

        norm = False

        j = k.find('_ft')
        j1 = k.find('_normalized')
        if j1 > 0:
            norm = True
            j = j1

        km = k[:j].replace('_', ' ')
        if km.startswith('milepost'):
            ext = km[9:]
            if ext != '': ext = ' (' + ext + ')'
            km = 'milepost nn' + ext

        km = km.replace(' depth', '; depth')
        km = km.replace(' iteration', '; iteration')

        kf = k[j + 1:]
        if norm:
            kf = kf[11:] + '\\newline' + '(normalized)'
        kf = kf.replace('_', ' .. ')

        a4 = "%.2f" % d['GCC 4.9.2'][k]
        a7 = "%.2f" % d['GCC 7.1.0'][k]

        # Full table (for interactive report)
        line = [km, kf, a4, a7]
        table.append(line)

        # Shorter version for paper - ugly but didn't have time to make it nicer ;)
        if 'depth 3' not in km and \
           'depth 5' not in km and \
           'depth 6' not in km and \
           'depth 7' not in km and \
           'depth 9' not in km and \
           'depth 10' not in km and \
           'depth 11' not in km and \
           'depth 12' not in km and \
           'depth 13' not in km and \
           'depth 14' not in km and \
           'depth 15' not in km and \
           'depth 17' not in km and \
           'depth 18' not in km and \
           'depth 19' not in km and \
           'depth 21' not in km and \
           'depth 22' not in km and \
           'depth 23' not in km and \
           'depth 24' not in km and \
           'depth 26' not in km and \
           'depth 27' not in km and \
           'depth 28' not in km and \
           'iteration 5' not in km and \
           'iteration 6' not in km and \
           'iteration 7' not in km and \
           'iteration 8' not in km and \
           'iteration 9' not in km:

            table_short.append(line)

        # Only short MILEPOST
        if km == 'milepost nn' and kf == 'ft1 .. ft56':
            table_milepost.append(line)

    dd = {
        "table_style":
        "border=\"1\"",
        "table_header": [{
            "name": "Model",
            "html_before": "<b>",
            "html_after": "</b>",
            "tex": "l",
            "tex_before": "\\textbf{",
            "tex_after": "}"
        }, {
            "name": "Features",
            "html_change_space": "yes",
            "tex": "p{1.2in}"
        }, {
            "name": "Accuracy (GCC 4.9.2)",
            "html_change_space": "yes",
            "tex": "p{0.9in}"
        }, {
            "name": "Accuracy (GCC 7.1.0)",
            "html_change_space": "yes",
            "tex": "p{0.9in}"
        }]
    }

    # Save full table file (for all models)
    dd['table'] = table

    r = ck.save_json_to_file({
        'json_file': 'save_all_model_data_tmp_table_full.json',
        'dict': dd,
        'sort_keys': 'yes'
    })
    if r['return'] > 0: return r

    # Save common table file (for all models)
    dd['table'] = table_short

    r = ck.save_json_to_file({
        'json_file': 'save_all_model_data_tmp_table_short.json',
        'dict': dd,
        'sort_keys': 'yes'
    })
    if r['return'] > 0: return r

    # Save common table file (for all models)
    dd['table'] = table_milepost

    r = ck.save_json_to_file({
        'json_file': 'save_all_model_data_tmp_table_milepost.json',
        'dict': dd,
        'sort_keys': 'yes'
    })
    if r['return'] > 0: return r

    return {'return': 0}
예제 #13
0
파일: comm.py 프로젝트: jmrtin72/cbench
def access(i):
    """
    Input:  {
              (filename) [str] - load JSON from this file
                or
              (json) [str] - parse JSON string from command line (use ' instead of ")
                or
              (dict) [dict] - dictionary to send to the cK API
            }

    Output: {
              return  [int]    - return code = 0 if success or >0 if error
              (error) [str]    - error string if return>0 
            }
    """

    import json

    filename = i.get('filename', '')
    json_string = i.get('json', '')

    display = i.get('display', '')

    data = i.get('dict', {})

    if filename == '' and json_string == '' and len(data) == 0:
        return {
            'return':
            1,
            'error':
            'either "filename" or "json" or "dict" should define data to be pushed to cK API'
        }

    if filename != '':
        r = ck.load_json_file({'json_file': filename})
        if r['return'] > 0: return r

        data2 = r['dict']
        data.update(data2)

    if json_string != '':
        json_string = json_string.replace("'", '"')

        data2 = json.loads(json_string)

        data.update(data2)
    if display == '':
        display = False

    # Get current configuration
    r = config.load({})
    if r['return'] > 0: return r
    cfg = r['dict']

    # Prepare request
    ii = {'config': cfg}
    ii.update(data)

    # Sending request to download
    r = send(ii)
    if r['return'] > 0: return r

    if display is True:
        ck.out('Output:')
        ck.out('')

        ck.out(json.dumps(r, indent=2))

    return r
예제 #14
0
def main(i):

    # Load common table file (for all models)
    ddd = {}
    r = ck.load_json_file({'json_file': 'save_all_model_data_tmp.json'})
    if r['return'] == 0:
        ddd = r['dict']

    # Searching for features
    for g in gcc:
        ck.out(
            '********************************************************************************'
        )
        ck.out('Modeling optimizations for ' + g)
        ck.out('')

        if g not in ddd: ddd[g] = {}

        gx = g.replace(' ', '_')

        r = ck.load_json_file(
            {'json_file': 'prepare_train_data_tmp.' + gx + '.json'})
        if r['return'] > 0: ck.err(r)

        d = r['dict']

        ftable = d['ftable']
        ctable = d['ctable']

        # Normalize (all features 0..1)
        ftable_range = {}
        for f in ftable:
            for k in range(0, 121):
                v = f[k]
                if k not in ftable_range:
                    ftable_range[k] = {'min': None, 'max': None}
                if ftable_range[k]['min'] == None or v < ftable_range[k]['min']:
                    ftable_range[k]['min'] = v
                if ftable_range[k]['max'] == None or v > ftable_range[k]['max']:
                    ftable_range[k]['max'] = v

        ftable_normalized = []
        for f in ftable:
            x = []
            for k in range(0, 121):
                v = 0
                if ftable_range[k]['max'] != 0:
                    v = f[k] / ftable_range[k]['max']
                x.append(v)
            ftable_normalized.append(x)

        features_mask = []
        for f in range(0, 121):
            features_mask.append(1)

        r = model({
            'ftable': ftable_normalized,
            'features_mask': features_mask,
            'ctable': ctable
        })
        if r['return'] > 0: return r

        r1 = ck.save_json_to_file({
            'json_file':
            'process_model_using_nearest_neighbour_reduce_features/prepare_reactions_model_train_ref_result.'
            + gx + '.json',
            'dict':
            r
        })
        if r1['return'] > 0: ck.err(r1)

        ref_acc = r['accuracy']  # Reference accuracy

        x = 'Reference accuracy: ' + str(ref_acc)
        s = x + '\n\n'

        ck.out('---------------------')
        ck.out(x)
        ck.out('')

        # Calculating Euclidean distance as in our MILEPOST GCC paper: https://hal.inria.fr/hal-00685276
        # MILPOST features: https://github.com/ctuning/ck-autotuning/blob/master/module/program.static.features/.cm/meta.json
        # 0..55 - original MILEPOST features
        # 56..64 - added by Jeremy Singer
        # 65..121 - 0..55/ft24 (normalized by total number of instructions)

        for k in range(0, 121):
            features_mask[k] = 0

            r = model({
                'ftable': ftable_normalized,
                'features_mask': features_mask,
                'ctable': ctable,
                'skip_out': 'yes'
            })
            if r['return'] > 0: return r

            acc = r['accuracy']

            keep = False
            sx = ''
            if acc < ref_acc:
                keep = True
                sx = 'kept'
            elif acc == ref_acc:
                sx = 'removed'
            elif acc > ref_acc:
                ref_acc = acc
                sx = 'removed (accuracy even improved)'

            if keep:
                features_mask[k] = 1

            x = 'ft' + str(k + 1) + ') ' + str(
                acc) + ' ' + sx + ' (ref acc=' + str(ref_acc) + ')'
            ck.out(x)
            s += x + '\n'

        # Final accuracy
        r = model({
            'ftable': ftable_normalized,
            'features_mask': features_mask,
            'ctable': ctable,
            'skip_out': 'yes'
        })
        if r['return'] > 0: return r

        acc = r['accuracy']

        r1 = ck.save_json_to_file({
            'json_file':
            'process_model_using_nearest_neighbour_reduce_features/prepare_reactions_model_train_reduced_result.'
            + gx + '.json',
            'dict':
            r
        })
        if r1['return'] > 0: ck.err(r1)

        # Final result
        ck.out('')
        ck.out('Final features mask:')
        ck.out('')

        s += '\nFinal features mask:\n\n'

        for f in range(0, len(features_mask)):
            x = '  ft' + str(f + 1) + ') ' + str(features_mask[f])
            ck.out(x)
            s += x + '\n'

        s += '\nFinal features mask:\n\n'
        s1 = ''

        for f in range(0, len(features_mask)):
            x = '  ft' + str(f + 1) + ') ' + str(features_mask[f])
            ck.out(x)
            s += x + '\n'

            if features_mask[f] == 1:
                if s1 != '': s1 += ','
                s1 += 'ft' + str(f + 1)

        s += '\nFinal accuracy: ' + str(acc) + '\n'

        r = ck.save_text_file({
            'text_file':
            'process_model_using_nearest_neighbour_reduce_features/log.' + gx +
            '.txt',
            'string':
            s
        })
        if r['return'] > 0: return r

        r = ck.save_text_file({
            'text_file':
            'process_model_using_nearest_neighbour_reduce_features/influential_features.'
            + gx + '.txt',
            'string':
            s1
        })
        if r['return'] > 0: return r

        ddd[g]['milepost_reduce_complexity2_normalized_ft1_ft121'] = acc

    # Save common data
    r = ck.save_json_to_file({
        'json_file': 'save_all_model_data_tmp.json',
        'dict': ddd
    })
    if r['return'] > 0: return r

    return {'return': 0}
def main(i):

    cur_dir = os.getcwd()
    fas = os.path.join(cur_dir, aggregated_stats)

    # Get some info about current platform
    ii = {'action': 'detect', 'module_uoa': 'platform', 'out': 'con'}

    r = ck.access(ii)
    if r['return'] > 0: return r

    hos = r['host_os_uid']
    hosx = r['host_os_uoa']
    hosd = r['host_os_dict']

    tos = r['os_uid']
    tosx = r['os_uoa']
    tosd = r['os_dict']

    cpu_name = r['features']['cpu']['name']
    plat_name = r['features']['platform']['name']

    #############################################################
    ck.out(line)
    ck.out('CPU name: ' + cpu_name)
    ck.out('Plat name: ' + plat_name)

    #############################################################
    ck.out(line)
    ck.out('Loading aggregated stats ...')

    aa = []
    if os.path.isfile(fas):
        r = ck.load_json_file({'json_file': fas})
        if r['return'] > 0: return r
        ax = r['dict']

        if 'all' not in ax: ax['all'] = []
        aa = ax['all']

    #############################################################
    ck.out(line)
    ck.out('Finding entry related to this platform ...')

    found = False
    for a in aa:
        if a.get('cpu_name', '') == cpu_name and a.get('plat_name',
                                                       '') == plat_name:
            found = True

    if not found:
        a = {'cpu_name': cpu_name, 'plat_name': plat_name}
        aa.append(a)

    if 'data' not in a: a['data'] = {}
    data = a.get('data', {})

    # Init pipeline
    r = ck.access({
        'action': 'pipeline',
        'module_uoa': 'program',
        'data_uoa': 'shared-matmul-c2',
        'cpu_freq': 'max',
        'gpu_freq': 'max',
        'speed': 'yes',
        'compiler_vars': {
            'USE_BLOCKED_MATMUL': 'YES'
        },
        'no_state_check': 'yes',
        'prepare': 'yes',
        'out': 'con'
    })
    if r['return'] > 0: return r

    ready = r['ready']
    if ready != 'yes':
        return {'return': 1, 'error': 'can\'t init pipeline'}

    pipeline = r

    # Compile program ones
    tpipeline = copy.deepcopy(pipeline)
    r = ck.access({
        'action': 'autotune',
        'module_uoa': 'pipeline',
        'pipeline': pipeline,
        'pipeline_update': {
            'env': {
                'CT_MATRIX_DIMENSION': 32,
                'CT_BLOCK_SIZE': 32
            }
        },
        'iterations': 1,
        'repetitions': 1,
        'out': 'con'
    })
    if r['return'] > 0: return r
    lsa = r.get('last_stat_analysis', {}).get('dict_flat', {})
    time_min = lsa.get('##characteristics#run#execution_time#min', None)
    if time_min == None or time_min == 0.0:
        return {'return': 1, 'error': 'failed to run default pipeline'}

    # data is per N size
    for nn in range(-1, 2):
        for n in range(3, 32):
            N = (2**n) + nn

            SN = str(N)

            if SN not in data: data[SN] = {}
            xdata = data.get(SN, {})

            tmin = xdata.get('tmin', None)
            tmax = xdata.get('tmax', None)
            gmin = xdata.get('gmin', None)
            gmax = xdata.get('gmax', None)
            best_tile = xdata.get('best_tile', None)

            for opts in range(0, 16):
                # Choose if random BS or power of two or power of two -+1
                if opts == 0:
                    BS = 1
                elif opts == 1:
                    BS = N
                else:
                    BS = 2**(opts - 1)
                    if BS > N: continue

                ck.out('Matrix size: ' + str(N))
                ck.out('Tile size:   ' + str(BS))

                # Run pipeline
                tpipeline = copy.deepcopy(pipeline)
                r = ck.access({
                    'action': 'autotune',
                    'module_uoa': 'pipeline',
                    'pipeline': pipeline,
                    'pipeline_update': {
                        'no_compile': 'yes',
                        'env': {
                            'CT_MATRIX_DIMENSION': N,
                            'CT_BLOCK_SIZE': BS
                        }
                    },
                    'iterations': 1,
                    'repetitions': 3,
                    'out': 'con'
                })
                if r['return'] > 0: return r

                lsa = r.get('last_stat_analysis', {}).get('dict_flat', {})
                tmin = lsa.get('##characteristics#run#execution_time#min',
                               None)

                changed = False
                if tmin != None:
                    ops = 2 * (N * N * N)
                    gflops = 1.0e-9 * ops / tmin
                    SBS = str(BS)

                    xdata[SBS] = {'tmin': tmin, 'gflops': gflops}

                    ck.out(line)
                    ck.out('Saving aggregated stats ...')

                    r = ck.save_json_to_file({
                        'json_file': fas,
                        'dict': {
                            'all': aa
                        },
                        'sort_keys': 'yes'
                    })
                    if r['return'] > 0: return r

    #############################################################
    ck.out(line)
    ck.out('Saving aggregated stats ...')

    r = ck.save_json_to_file({
        'json_file': fas,
        'dict': {
            'all': aa
        },
        'sort_keys': 'yes'
    })
    if r['return'] > 0: return r

    return {'return': 0}
예제 #16
0
def process_web_request(i):
    """

  Input:  {
            http - Python http object
          }

  Output: { None }
  """

    from . import solution

    # http object
    http = i['http']

    # Parse GET variables and path
    xget = {}
    xpath = {
        'host': '',
        'port': '',
        'first': '',
        'rest': '',
        'query': ''
    }  # May be used in the future

    xt = 'json'

    xpath['host'] = i.get('host', '')
    xpath['port'] = i.get('port', '')

    # Check GET variables
    if http.path != '':
        http.send_response(200)

        a = urlparse.urlparse(http.path)
        xp = a.path
        xr = ''

        if xp.startswith('/'): xp = xp[1:]

        u = xp.find('/')
        if u >= 0:
            xr = xp[u + 1:]
            xp = xp[:u]

        xt = xp

        xpath['first'] = xp
        xpath['rest'] = xr
        xpath['query'] = a.query
        b = urlparse.parse_qs(
            a.query,
            keep_blank_values=True,
        )

        xget = {}
        for k in b:
            xget[k] = urlunquote(b[k][0])
            if sys.version_info[0] < 3:
                xget[k] = xget[k].decode('utf8')

    # Check POST
    xpost = {}
    xpost1 = {}

    try:
        headers = http.headers
        content_type = headers.get('content-type')
        ctype = ''
        if content_type != None:
            ctype, pdict = cgi.parse_header(content_type)
            # Python3 cgi.parse_multipart expects boundary to be bytes, not str.
            if sys.version_info[0] < 3 and 'boundary' in pdict:
                pdict['boundary'] = pdict['boundary'].encode()

        if ctype == 'multipart/form-data':
            if sys.version_info[0] < 3:
                xpost1 = cgi.parse_multipart(http.rfile, pdict)
            else:
                xxpost1 = cgi.FieldStorage(fp=http.rfile,
                                           headers=headers,
                                           environ={'REQUEST_METHOD': 'POST'})
                for k in xxpost1.keys():
                    xpost1[k] = [xxpost1[k].value]
        elif ctype == 'application/x-www-form-urlencoded':
            length = int(http.headers.get('content-length'))
            s = http.rfile.read(length)
            if sys.version_info[0] > 2: s = s.decode('utf8')
            xpost1 = cgi.parse_qs(s, keep_blank_values=1)

    except Exception as e:
        web_err({'http': http, 'type': xt, 'bin': bin})
        ck.out(ck.cfg['error'] + bin.decode('utf8'))
        return

    # Post processing
    for k in xpost1:
        v = xpost1[k]
        if k.endswith('[]'):
            k1 = k[:-2]
            xpost[k1] = []
            for l in v:
                xpost[k1].append(urlunquote(l))
        else:
            if k != 'file_content':
                xpost[k] = urlunquote(v[0])
            else:
                xpost[k] = v[0]

        if k == 'file_content':
            fcrt = xpost1.get('file_content_record_to_tmp', '')
            if (type(fcrt) == list and len(fcrt) > 0
                    and fcrt[0] == 'yes') or fcrt == 'yes':
                fd, fn = tempfile.mkstemp(
                    suffix='.tmp', prefix='ck-'
                )  # suffix is important - CK will delete such file!
                os.close(fd)

                f = open(fn, 'wb')
                f.write(xpost[k])
                f.close()

                xpost[k + '_uploaded'] = fn
                del (xpost[k])
                k += '_uploaded'
            else:
                import base64
                xpost[k + '_base64'] = base64.urlsafe_b64encode(
                    xpost[k]).decode('utf8')
                del (xpost[k])
                k += '_base64'

        if sys.version_info[0] < 3:
            xpost[k] = xpost[k].decode('utf8')

    # Prepare input and check if CK json present
    ii = xget
    ii.update(xpost)

    act = ii.get('action', '')

    # Generate tmp file (to output images for example)
    fd, fn = tempfile.mkstemp(
        suffix='.tmp',
        prefix='ck-')  # suffix is important - CK will delete such file!
    os.close(fd)
    if os.path.isfile(fn): os.remove(fn)

    # Get tmp dir
    p = tempfile.gettempdir()

    # Execute command *********************************************************
    ck.out('***************************************************************')
    ck.out('Received action request: ' + act)
    if act == 'get_host_platform_info':
        r = ck.access({'action': 'detect', 'module_uoa': 'platform'})
        if r['return'] > 0:
            # Process error properly
            web_err({
                'http': http,
                'type': xt,
                'bin': r['error'].encode('utf8')
            })
            return

        s = json.dumps(r, indent=2, sort_keys=True)
        web_out({'http': http, 'type': 'json', 'bin': s.encode('utf8')})

        return
    #############################################################################################################3
    elif act == 'run_program':

        data_id = ii.get('data_id', '')

        r = solution.run({'uid': data_id})

        # start program
        #    r=ck.access({'action':'run',
        #          'module_uoa':'program',
        #          'data_uoa':ii.get('program_name',''),
        #          'cmd_key': 'use_continuous',
        #          'deps.python': 'a699c0c7de43a121',
        #          'quiet': 'yes'})

        if r['return'] > 0:
            # Process error properly
            web_err({
                'http': http,
                'type': xt,
                'bin': r['error'].encode('utf8')
            })
            return

        solution = {'status': True}
        s = json.dumps(solution, indent=4, sort_keys=True)
        web_out({'http': http, 'type': 'json', 'bin': s.encode('utf8')})

        return

    elif act == 'benchmark_program':

        data_id = ii.get('data_id', '')

        r = solution.benchmark({'uid': data_id})

        if r['return'] > 0:
            # Process error properly
            web_err({
                'http': http,
                'type': xt,
                'bin': r['error'].encode('utf8')
            })
            return

        solution = {'status': True}
        s = json.dumps(solution, indent=4, sort_keys=True)
        web_out({'http': http, 'type': 'json', 'bin': s.encode('utf8')})
        return

    elif act == 'publish_result':

        data_id = ii.get('data_id', '')

        r = solution.publish_result({'uid': data_id})

        if r['return'] > 0:
            # Process error properly
            web_err({
                'http': http,
                'type': xt,
                'bin': r['error'].encode('utf8')
            })
            return

        solution = {'status': True}
        s = json.dumps(solution, indent=4, sort_keys=True)
        web_out({'http': http, 'type': 'json', 'bin': s.encode('utf8')})
        return

    #############################################################################################################3
    elif act == 'get_program_result_image':

        data_id = ii['data_id']
        program_name = ii['program_name']

        jpeg = ii.get('jpeg', '')

        ck_entry = program_name.split(':')

        # Find solution
        r = ck.access({
            'action': 'load',
            'module_uoa': 'cr-solution',
            'data_uoa': data_id
        })
        if r['return'] > 0:
            # Process error properly
            web_err({
                'http': http,
                'type': xt,
                'bin': r['error'].encode('utf8')
            })
            return

        p = r['path']

        meta = r['dict']
        workflow_output_dir = meta.get('workflow_output_dir', '')

        workflow_repo = meta.get('workflow_repo_url', '')
        j = workflow_repo.rfind('/')
        if j > 0:
            workflow_repo = workflow_repo[j + 1:]

        cur_dir = os.path.join(p, 'CK', workflow_repo, ck_entry[0],
                               ck_entry[1])
        if workflow_output_dir != '':
            cur_dir = os.path.join(cur_dir, workflow_output_dir)

        #       r=ck.access({'action':'find',
        #            'module_uoa':'program',
        #            'data_uoa':ii.get('program_name','')})
        #
        #       if r['return']>0:
        #          # Process error properly
        #          web_err({'http':http, 'type':xt, 'bin':r['error'].encode('utf8')})
        #          return

        #    cur_dir = 'D:\\Work1\\CK\\ck-repos\\local\\cr-solution\\demo-obj-detection-kitti-min-tf-cpu-win\\CK\\ck-tensorflow\\program\\squeezedet\\tmp\\out' #os.path.join(r['path'],"tmp/out")
        #    cur_dir='/home/cindex/CK/local/cr-solution/demo-obj-detection-self-driving-win/CK/ck-tensorflow/program/squeezedet/tmp/out'
        #    cur_dir='/home/cindex/CK/local/cr-solution/demo-obj-detection-kitti-min-tf-cpu-win/CK/ck-tensorflow/program/squeezedet/tmp/out'

        # find the penultimate image provided
        try:
            st = False
            filepath = ''
            filepath_buf = ''

            found_files = []

            ck.out('')
            ck.out('Checking for output files in directory:')
            ck.out('  ' + cur_dir)
            ck.out('')

            sorted_list = sorted(os.listdir(cur_dir))
            for file in sorted_list:
                if file.endswith(".png") and file.startswith("boxed_"):
                    found_files.append(file)
                    if len(found_files) == 3:
                        break
        except:
            err = 'no files available'
            web_err({'http': http, 'type': xt, 'bin': err.encode('utf8')})
            return

        if len(found_files) == 0:
            err = 'no files available'
            web_err({'http': http, 'type': xt, 'bin': err.encode('utf8')})
            return

        if len(found_files) == 1:
            filepath = ''
            filepath_buf = found_files[0]
        elif len(found_files) == 2:
            filepath = ''
            filepath_buf = found_files[1]
        elif len(found_files) == 3:
            filepath = found_files[0]
            filepath_buf = found_files[1]

        # Check if convert to jpeg
        file_type = 'png'
        pinp = os.path.join(cur_dir, filepath_buf)

        if jpeg == 'yes':
            quality = ii.get('jpeg_quality', '')
            if quality == None or quality == '': quality = '70'

            pout = os.path.join(cur_dir, filepath_buf + '.jpg')

            s = 'convert -quality ' + quality + ' ' + pinp + ' ' + pout

            ck.out('')
            ck.out('  Converting to jpeg: ' + s)

            os.system(s)

            pinp = pout
            filepath_buf += '.jpg'
            file_type = 'jpg'

        # First file will be deleted (only if 2 afterwards), second served
        ck.out('  Loading file ' + filepath_buf)
        r = ck.load_text_file({'text_file': pinp, 'keep_as_bin': 'yes'})

        if jpeg == 'yes':
            if os.path.isfile(pinp):
                os.remove(pinp)

        # Remove first
        if filepath != '':
            ck.out('  Trying to delete file ' + filepath)
            x = os.path.join(cur_dir, filepath)
            if os.path.isfile(x):
                os.remove(x)

        # Then finish checking previous one
        if r['return'] > 0:
            bout = r['error'].encode('utf-8')
        else:
            bout = r['bin']

        web_out({'http': http, 'type': file_type, 'bin': bout})

        return

    #############################################################################################################3
    elif act == 'process_webcam':

        data_id = ii['data_id']
        program_name = ii['program_name']

        ck_entry = program_name.split(':')

        # Find solution
        r = ck.access({
            'action': 'load',
            'module_uoa': 'cr-solution',
            'data_uoa': data_id
        })
        if r['return'] > 0:
            # Process error properly
            web_err({
                'http': http,
                'type': xt,
                'bin': r['error'].encode('utf8')
            })
            return

        pp = r['path']  # Path to solution!

        meta = r['dict']

        # Find workflow output path
        workflow_input_dir = meta.get('workflow_input_dir', '')
        workflow_output_dir = meta.get('workflow_output_dir', '')
        workflow_repo = meta.get('workflow_repo_url', '')

        j = workflow_repo.rfind('/')
        if j > 0:
            workflow_repo = workflow_repo[j + 1:]

        workflow_dir = os.path.join(pp, 'CK', workflow_repo, ck_entry[0],
                                    ck_entry[1])

        if workflow_input_dir != '':
            p = os.path.join(workflow_dir, workflow_input_dir)
        else:
            p = os.path.join(workflow_dir, "tmp", "input")

        if not os.path.isdir(p): os.makedirs(p)

        if workflow_output_dir != '':
            pout = os.path.join(workflow_dir, workflow_output_dir)
        else:
            pout = os.path.join(workflow_dir, "tmp")

        if not os.path.isdir(pout): os.makedirs(pout)

        # Record image
        image_uri = xpost.get('image_uri', '')

        x = 'data:image/jpeg;base64,'
        if image_uri.startswith(x):
            image64 = image_uri[len(x):]

        # Finding last file and incrementing
        ff = 'cr-stream-'

        l = os.listdir(p)

        inum = 0
        ffound = ''
        for f in os.listdir(p):
            if f.startswith(ff) and f.endswith('.jpg'):
                j = f.find('.')
                num = f[len(ff):j]
                if int(num) > inum:
                    inum = int(num)
                    ffound = f

        # New logic: if file already exists, just skip next request from web (otherwise many parallel requests)
        # When program starts, it should clean input/output to let this code continue processing image
        if (inum > 0):
            time.sleep(1)
            ss = 'request skipped because there is already file in queue'
            ck.out('  Warning: ' + ss + ' (' + os.path.join(p, ffound) +
                   ') ...')
            s = '{"return":16, "error":"' + ss + '"}'
            web_out({'http': http, 'type': 'json', 'bin': s.encode('utf8')})
            return

        # Otherwise continue processing ...
        if inum == 0:
            inum += 1
            sinum = str(inum)
            filename = ff + ('0' * (8 - len(sinum))) + sinum

            filename2 = filename + '.jpg'
            pf = os.path.join(p, filename2)

            r = ck.convert_upload_string_to_file({
                'file_content_base64': image64,
                'filename': pf
            })
            if r['return'] > 0: return r

            ck.out(
                '  !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
            )
            ck.out('  Recorded external image to ' + pf)

            # Need extra converting
            pp1 = os.path.join(pp, 'support-script-convert.sh')
            if os.path.isfile(pp1):
                ck.out('')
                ck.out('Extra image processing ...')
                ck.out('')

                extra_cmd = 'cd "' + p + '"\n'
                extra_cmd += '. "' + pp1 + '" ' + filename2 + '\n'

                r = solution.run({'uid': data_id, 'cmd': extra_cmd})
                if r['return'] > 0:
                    # Process error properly
                    web_err({
                        'http': http,
                        'type': xt,
                        'bin': r['error'].encode('utf8')
                    })
                    return

        else:
            sinum = str(inum)
            filename = ff + ('0' * (8 - len(sinum))) + sinum

            filename2 = filename + '.jpg'
            pf = os.path.join(p, filename2)

        # Need extra pushing
        pp1 = os.path.join(pp, 'support-script-push.sh')
        if os.path.isfile(pp1):
            ck.out('')
            ck.out('Extra image pushing to device ...')
            ck.out('')

            extra_cmd = 'cd "' + p + '"\n'
            extra_cmd += '. "' + pp1 + '" ' + filename + '\n'

            r = solution.run({'uid': data_id, 'cmd': extra_cmd})
            if r['return'] > 0:
                # Process error properly
                web_err({
                    'http': http,
                    'type': xt,
                    'bin': r['error'].encode('utf8')
                })
                return

        # If Android-like device wait for the file ...
        ppull = os.path.join(pp, 'support-script-pull.sh')

        # Waiting for output file
        poutf = os.path.join(pout, filename + '.json')

        if not os.path.isfile(poutf):
            ck.out('Waiting for output file: ' + poutf)

        while not os.path.isfile(poutf):
            # Check if need to pull
            if os.path.isfile(ppull):
                ck.out('Trying to pull from device ...')

                extra_cmd = 'cd "' + pout + '"\n'
                extra_cmd += 'export CODEREEF_SOLUTION_PATH="' + pp + '"\n'
                extra_cmd += '. "' + ppull + '" ' + filename + '\n'

                r = solution.run({'uid': data_id, 'cmd': extra_cmd})
                if r['return'] > 0:
                    # Process error properly
                    web_err({
                        'http': http,
                        'type': xt,
                        'bin': r['error'].encode('utf8')
                    })
                    return

            time.sleep(0.1)

        ck.out('')
        ck.out('Found solution!')
        ck.out('')

        with open(poutf) as json_file:
            solution = json.load(json_file)
            ck.out(json.dumps(solution, indent=2))

        if os.path.isfile(poutf):
            os.remove(poutf)

        if inum == 1 and os.path.isfile(pf):
            ck.out('  REMOVING ' + pf)
            os.remove(pf)

        ck.out('')

        s = json.dumps(solution, indent=4, sort_keys=True)
        web_out({'http': http, 'type': 'json', 'bin': s.encode('utf8')})

        return

    #############################################################################################################3
    elif act == 'get_image':
        num = ii.get('num', '')
        inum = int(num)
        sinum = str(inum)

        # Finding last file and incrementing
        ff = 'cr-stream-'
        pf = os.path.join(p, ff + ('0' * (8 - len(sinum))) + sinum + '.jpg')

        ck.out('  Loaded file ' + pf)

        r = ck.load_text_file({'text_file': pf, 'keep_as_bin': 'yes'})
        if r['return'] > 0:
            bout = r['error'].encode('utf-8')
        else:
            bout = r['bin']

        web_out({'http': http, 'type': 'jpeg', 'bin': bout})

        return

    #############################################################################################################3
    elif act == 'get_result':

        data_id = ii['data_id']

        # Find solution
        r = ck.access({
            'action': 'load',
            'module_uoa': 'cr-solution',
            'data_uoa': data_id
        })
        if r['return'] > 0:
            # Process error properly
            web_err({
                'http': http,
                'type': xt,
                'bin': r['error'].encode('utf8')
            })
            return

        pp = r['path']  # Path to solution!

        meta = r['dict']

        program_name = meta.get('workflow', '')
        ck_entry = program_name.split(':')

        # Find workflow output path
        result_file = meta.get('result_file', '')
        workflow_repo = meta.get('workflow_repo_url', '')

        j = workflow_repo.rfind('/')
        if j > 0:
            workflow_repo = workflow_repo[j + 1:]

        workflow_dir = os.path.join(pp, 'CK', workflow_repo, ck_entry[0],
                                    ck_entry[1])

        if result_file != '':
            pout = os.path.join(workflow_dir, result_file)
        else:
            pout = os.path.join(workflow_dir, "tmp", "tmp-ck-timer.json")

        # if not os.path.isdir(pout):  os.makedirs(pout)

        # If Android-like device wait for the file ...
        ppull = os.path.join(pp, 'support-script-pull.sh')

        # Waiting for output file
        if not os.path.isfile(pout):
            ck.out('Waiting for output file: ' + pout)

        while not os.path.isfile(pout):
            # Check if need to pull
            if os.path.isfile(ppull):
                ck.out('Trying to pull from device ...')

                extra_cmd = 'cd "' + pout + '"\n'
                extra_cmd += 'export CODEREEF_SOLUTION_PATH="' + pp + '"\n'
                extra_cmd += '. "' + ppull + '" ' + filename + '\n'

                r = solution.run({'uid': data_id, 'cmd': extra_cmd})
                if r['return'] > 0:
                    # Process error properly
                    web_err({
                        'http': http,
                        'type': xt,
                        'bin': r['error'].encode('utf8')
                    })
                    return

            time.sleep(0.1)

        ck.out('')
        ck.out('Found solution!')
        ck.out('')

        rx = ck.load_json_file({'json_file': pout})
        if rx['return'] > 0: return rx

        rx = ck.flatten_dict(rx)
        if rx['return'] > 0: return rx

        rdf = rx['dict']
        crdf = {}

        # Remove first ## (do not need here)
        for k in rdf:
            v = rdf[k]
            if k.startswith('##'): k = k[2:]
            crdf[k] = v
        ck.out(json.dumps(crdf, indent=2))

        # if os.path.isfile(pout):
        #   os.remove(pout)

        # if inum==1 and os.path.isfile(pf):
        # ck.out('  REMOVING '+pf)
        # os.remove(pf)

        ck.out('')

        s = json.dumps(crdf, indent=4, sort_keys=True)
        web_out({'http': http, 'type': 'json', 'bin': s.encode('utf8')})

        return

    elif act == 'get_status':

        data_id = ii['data_id']

        # Find solution
        r = ck.access({
            'action': 'load',
            'module_uoa': 'cr-solution',
            'data_uoa': data_id
        })
        if r['return'] > 0:
            # Process error properly
            web_err({
                'http': http,
                'type': xt,
                'bin': r['error'].encode('utf8')
            })
            return

        pp = r['path']  # Path to solution!
        tmp_solStatus = os.path.join(pp, "tmp", "status.json")

        rx = ck.load_json_file({'json_file': tmp_solStatus})
        if rx['return'] > 0: return rx

        ck.out(json.dumps(rx, indent=2))

        rdf = rx['dict']

        ck.out('')

        s = json.dumps(rdf, indent=4, sort_keys=True)
        web_out({'http': http, 'type': 'json', 'bin': s.encode('utf8')})

        return

    #############################################################################################################3
    elif act == 'heartbit':

        locdir = os.path.dirname(os.path.realpath(__file__))
        ck.out('  Local directory: ' + locdir)

        # Finding last file and incrementing
        pf = os.path.join(locdir, 'static/favicon.ico')

        ck.out('  Loaded file ' + pf)

        r = ck.load_text_file({'text_file': pf, 'keep_as_bin': 'yes'})
        if r['return'] > 0:
            bout = r['error'].encode('utf-8')
        else:
            bout = r['bin']

        web_out({'http': http, 'type': 'jpeg', 'bin': bout})

        return

        r = {'return': 0}
        xt = 'web'
        bout = b'TEST WORKS'

        web_out({'http': http, 'type': xt, 'bin': bout})
        return

        # Process output
        if r['return'] > 0:
            if os.path.isfile(fn): os.remove(fn)

            bout = r['error']

            try:
                bout = bout.encode('utf-8')
            except Exception as e:
                pass

            web_err({'http': http, 'type': xt, 'bin': bout})
            return

        # If json or web
        # Try to load output file
        if not os.path.isfile(fn):
            web_err({
                'http':
                http,
                'type':
                xt,
                'bin':
                b'Output file was not created, see output (' +
                r['std'].encode('utf8') + b')!'
            })
            return

        r = ck.load_text_file({'text_file': fn, 'keep_as_bin': 'yes'})
        if r['return'] > 0:
            bout = r['error']

            try:
                bout = bout.encode('utf-8')
            except Exception as e:
                pass

            web_err({'http': http, 'type': xt, 'bin': bout})

            return

        bin = r['bin']

        # Process JSON output from file
        fx = ''

        if sys.version_info[0] > 2: bin = bin.decode('utf-8')

        ru = ck.convert_json_str_to_dict({
            'str': bin,
            'skip_quote_replacement': 'yes'
        })
        if ru['return'] > 0:
            bout = ru['error']

            try:
                bout = bout.encode('utf-8')
            except Exception as e:
                pass

            web_err({'http': http, 'type': xt, 'bin': bout})

            return

        rr = ru['dict']
        if rr['return'] > 0:
            bout = rr['error']

            try:
                bout = bout.encode('utf-8')
            except Exception as e:
                pass

            web_err({'http': http, 'type': xt, 'bin': bout})
            return

        # Check if file was returned
        fr = False

        if 'file_content_base64' in rr and rr.get('filename', '') != '':
            fr = True

        # Check if download
        if (xt == 'web' and fr) or (act == 'pull' and xt != 'json'):
            import base64
            x = rr.get('file_content_base64', '')

            fx = rr.get('filename', '')
            if fx == '': fx = ck.cfg['default_archive_name']

            # Fixing Python bug
            if sys.version_info[0] == 3 and sys.version_info[1] < 3:
                x = x.encode('utf-8')
            else:
                x = str(x)
            bin = base64.urlsafe_b64decode(
                x)  # convert from unicode to str since base64 works on strings
            # should be safe in Python 2.x and 3.x

            # Process extension
            fn1, fne = os.path.splitext(fx)
            if fne.startswith('.'): fne = fne[1:]
            if fne != '': xt = fne
            else: xt = 'unknown'
        else:
            # Check and output html
            if rr.get('html', '') != '':
                bin = rr['html'].encode('utf-8')
            else:
                if sys.version_info[0] > 2:  # Unknown output
                    bin = bin.encode('utf-8')

        web_out({'http': http, 'type': xt, 'bin': bin, 'filename': fx})

        return {'return': 0}
          dd['##characteristics#gpu_copy_is_much_better_cpu#min']=False

       if (fcpu/fgpu_only)>1.07:
          dd['##characteristics#gpu_only_is_much_better_cpu#min']=True
       else:
          dd['##characteristics#gpu_only_is_much_better_cpu#min']=False

    return {'return':0, 'changed':changed, 'dict':dd}

########################################################
ff=getattr(sys.modules[__name__], 'filter_data')

ii={'action':'filter',
    'module_uoa':'experiment',
    'out':'con',
    'filter_func_addr':ff}

r=ck.load_json_file({'json_file':'filter-add-characteristic-gpu-copy-is-much-better-cpu.py'})
if r['return']>0: 
   ck.out('Error:'+r['error'])
   exit(1)

ii.update(r['dict'])

r=ck.access(ii)
if r['return']>0: 
   ck.out('Error:'+r['error'])
   exit(1)

exit(0)
예제 #18
0
def push(i):

    """
    Input:  {
              uid [str] - graph identifyer
              (version) [str] - graph version
              (filename) [str] - JSON file with results
              (json) [str] - JSON string from command line (use ' instead of ")
              (point) [str] - specific point name to add/update
            }

    Output: {
              return  [int]    - return code = 0 if success or >0 if error
              (error) [str]    - error string if return>0 

              dict    [dict]   - configuration dictionary
              path    [str]    - path to CK cfg entry
            }
    """

    # CID ###########################################################        
    uid=i['uid']
    if uid=='':
       return {'return':1, 'error':'graph UID is not defined!'}

    version=i.get('version')
    if version==None: version=''

    filename=i.get('filename','')
    json_string=i.get('json','')

    if filename=='' and json_string=='':
       return {'return':1, 'error':'either "filename" or "json" should define results to be pushed'}

    point=i.get('point','')

    # Prepare data
    data=[]

    if filename!='':
       r=ck.load_json_file({'json_file':filename})
       if r['return']>0: return r

       data2=r['dict']
       if type(data2)==dict:
          data2=[data2]

       data+=data2

    if json_string!='':
       import json

       json_string=json_string.replace("'", '"')

       data2=json.loads(json_string)

       if type(data2)==dict:
          data2=[data2]

       data+=data2

    # Send request
    r=config.load({})
    if r['return']>0: return r
    cfg=r['dict']

    # Check if username and API_Key are empty and then use default crowd-user ...
    username=cfg.get('username','')
    if username=='' or username==None:
       cfg['username']=config.CR_DEFAULT_SERVER_USER
       cfg['api_key']=config.CR_DEFAULT_SERVER_API_KEY

    # Sending request to download
    r=comm.send({'config':cfg,
                 'action':'push_result',
                 'dict':{
                   'data_uoa':uid,
                   'version':version,
                   'point':point,
                   'data':data
                 }
                })
    if r['return']>0: return r
    url=r.get('url','')

    ck.out('  Successfully pushed your point to a graph!')
    if url!='':
       ck.out('    URL: '+url)

    return r
def do(i):

    top1 = {}
    top5 = {}

    # List accuracy entries
    r = ck.access({
        'action': 'search',
        'module_uoa': 'experiment',
        'data_uoa': 'ck-request-asplos18-caffe-intel-performance-*',
        'repo_uoa': 'local',
        'add_meta': 'yes'
    })
    if r['return'] > 0: return r
    lst = r['lst']

    for q in lst:
        duid = q['data_uid']
        duoa = q['data_uoa']

        path = q['path']

        if 'inception-v3' in duoa:
            model = 'inception-v3'
            model_species = '1b339ddb13408f8f'
        elif 'resnet50' in duoa:
            model = 'resnet50'
            model_species = 'd777f6335496db61'

        if model == '':
            return {'return': 1, 'error': 'model is not recognized'}

        prec = ''
        if '-fp32' in duoa:
            prec = 'fp32'
        elif '-int8' in duoa:
            prec = 'int8'

        if prec == '':
            return {'return': 1, 'error': 'model precision is not recognized'}

        ck.out('* ' + duoa + ' / ' + model + ' / ' + prec)

        # Search matching accuracy entry (if intel-request)
        x = 'ck-request-asplos18-caffe-intel-accuracy.*.' + model + '-' + prec
        r = ck.access({
            'action': 'search',
            'module_uoa': 'experiment',
            'data_uoa': x,
            'repo_uoa': 'local'
        })
        if r['return'] > 0: return r
        alst = r['lst']
        if len(alst) != 1:
            return {'return': 1, 'error': 'ambiguity when search for accuracy'}

        a = alst[0]
        apath = a['path']

        # There is only one point normally (no model tuning)
        dacc = {}
        xacc = os.listdir(apath)

        for f in xacc:
            if f.endswith('.flat.json'):
                r = ck.load_json_file({'json_file': os.path.join(apath, f)})
                if r['return'] > 0: return r

                dx = r['dict']

                # Get only accuracy keys (convert to common format)
                for k in dx:
                    if k.startswith('##characteristics#run#acc/top-'):
                        k1 = '##characteristics#run#accuracy_top' + k[30:]
                        dacc[k1] = dx[k]
                    elif k.startswith('##characteristics#run#accuracy/top-'):
                        k1 = '##characteristics#run#accuracy_top' + k[35:]
                        dacc[k1] = dx[k]

                break

        if len(dacc) == 0:
            return {
                'return': 1,
                'error': 'strange - no match for accuracy entries'
            }

        # Iterating over points to aggregate
        dperf = os.listdir(path)
        for f in dperf:
            if f.endswith('.flat.json'):
                ck.out(' * ' + f)

                # Load performance file
                p1 = os.path.join(path, f)

                r = ck.load_json_file({'json_file': p1})
                if r['return'] > 0: return r
                d = r['dict']

                # Merge accuracy
                for k in dacc:
                    d[k] = dacc[k]

                # Save updated dict
                r = ck.save_json_to_file({
                    'json_file': p1,
                    'dict': d,
                    'sort_keys': 'yes'
                })
                if r['return'] > 0: return r

    return {'return': 0}
예제 #20
0
def init(i):

    """
    Input:  {
              uid [str] - graph identifyer
              (version) [str] - graph version
              (desc_file) [str] - file with graph description
              (tags) [str] - tags separated by comma
            }

    Output: {
              return  [int]    - return code = 0 if success or >0 if error
              (error) [str]    - error string if return>0 

              dict    [dict]   - configuration dictionary
              path    [str]    - path to CK cfg entry
            }
    """

    # Get main configuration
    r=config.load({})
    if r['return']>0: return r
    cfg=r.get('dict',{})
    pcfg=r.get('path','')

    # CID ###########################################################        
    uid=i['uid']
    if uid==None: uid=''

    version=i.get('version')
    if version==None: version=''

    desc_file=i.get('desc_file','')
    if desc_file==None: desc_file=''

    # If UID!='', check if already exists ...
    found=False
    meta=meta_template
    path=''
    data_name=''
    tags=[]
    meta_info=''
    source=''
    extra_info={}

    if uid!='':
       r=ck.access({'action':'load',
                    'module_uoa':'result',
                    'data_uoa':uid})
       if r['return']>0:
          if r['return']!=16: return r
       else:
          found=True
          meta=r['dict']
          path=r['path']
          data_name=r['data_name']

          tags=meta.get('tags',[])
          source=meta.get('source','')
          meta_info=meta.get('meta',{}).get('info','')

          extra_info=r['info'].get('control',{})

    # Check if init from scratch and no title
    if i.get('name')!=None and i.get('name','')!='':
       data_name=i['name'].strip()
    elif not found or data_name=='':
       r=ck.inp({'text':'Select a title for your graph: '})
       if r['return']>0: return r

       data_name=r['string'].strip()

       meta['meta']['title']=data_name

    # Check if init from scratch and no title
    if not found or meta_info=='':
       r=ck.inp({'text':'Enter general info about your graph: '})
       if r['return']>0: return r

       x=r['string'].strip()

       if x=='': x=' '

       meta['meta']['info']=x

    # Adding tags
    if i.get('tags')!=None and i.get('tags','')!='':
       xtags=i['tags'].strip().split(',')

       for t in xtags:
           t1=t.strip()
           if t1!='' and t1 not in tags:
              tags.append(t1)

       meta['tags']=tags

    elif not found or (len(tags)==1 and 'result' in tags):
       r=ck.inp({'text':'Enter tags for your graph separated by commas: '})
       if r['return']>0: return r

       xtags=r['string'].strip().split(',')

       for t in xtags:
           t1=t.strip()
           if t1!='' and t1 not in tags:
              tags.append(t1)

       meta['tags']=tags

    # Checking source
    if not found or source=='':
       r=ck.inp({'text':'Enter source of results for your graph (can be URL): '})
       if r['return']>0: return r

       source=r['string'].strip()

       meta['source']=source

    # Checking authors
    for x in extra_info_desc:
        k=x['key']
        n=x['name']

        if not found or extra_info.get(k,'')=='':
           r=ck.inp({'text':'Enter '+n+': '})
           if r['return']>0: return r

           s=r['string'].strip()

           extra_info[k]=s

    # Creating/updating graph
    a='add'
    if found: a='update'

    ii={'action':a,
        'module_uoa':'result',
        'data_uoa':uid,
        'dict':meta,
        'sort_keys':'yes',
        'data_name':data_name,
        'substitute':'yes',
        'extra_info':extra_info}

    r=ck.access(ii)
    if r['return']>0: return r

    data_uoa=r['data_uoa']
    data_uid=r['data_uid']
    path=r['path']

    x='initialized'
    if found: x='updated'

    ck.out('Graph was successfully '+x+':')
    ck.out('')
    ck.out('  CK UID:  '+data_uid)
    ck.out('  CK name: '+data_uoa)
    ck.out('  CK path: '+path)

    # Add desc
    p1=os.path.join(path, 'desc.json')

    dt=copy.deepcopy(desc_template)
    if desc_file!='':
       rx=ck.load_json_file({'json_file':desc_file})
       if rx['return']>0: return rx
       dx=rx['dict']
       dt['data_config'].update(dx)

    if desc_file!='' or not os.path.isfile(p1):
       rx=ck.save_json_to_file({'json_file':p1, 'dict':dt, 'sort_keys':'yes'})
       if rx['return']>0: return rx

    p2=os.path.join(path, '.cm', 'meta.json')

    ck.out('')
    ck.out('You can continue updating graph using following files: ')
    ck.out('')
    ck.out('  Graph general meta info: '+p1)
    ck.out('     See example at '+config.CR_DEFAULT_SERVER+'/result/sota-mlperf-inference-results-v0.5-open-available/?action=download&filename=.cm/meta.json')
    ck.out('')
    ck.out('  Graph axes info: '+p2)
    ck.out('     See example at '+config.CR_DEFAULT_SERVER+'/result/sota-mlperf-inference-results-v0.5-open-available/?action=download&filename=desc.json')

    # Need to publish
    ck.out('')
    rx=ck.inp({'text':'Publish graph on the portal (Y/n)?'})
    if rx['return']>0: return rx
    s=rx['string'].strip().lower()

    if s=='' or s=='y':
       ck.out('')
       r=obj.publish({'cid':'result:'+data_uoa,
                      'version':version,
                      'force':True})

    else:
       ck.out('')
       ck.out('You can publish your graph on the portal using the following commands when ready: ')
       ck.out('')
       ck.out('  cb publish result:'+data_uoa+' --version=1.0.0 --force (--private)')

    return r
예제 #21
0
def main(i):

    # Load common table file (for all models)
    ddd = {}
    r = ck.load_json_file({'json_file': 'save_all_model_data_tmp.json'})
    if r['return'] == 0:
        ddd = r['dict']

    # Searching for features
    for g in gcc:
        ck.out(
            '********************************************************************************'
        )
        ck.out('Reducing model complexity for ' + g)
        ck.out('')

        if g not in ddd: ddd[g] = {}

        gx = g.replace(' ', '_')

        r = ck.load_json_file(
            {'json_file': 'prepare_train_data_tmp.' + gx + '.json'})
        if r['return'] > 0: ck.err(r)

        d = r['dict']

        ftable = d['ftable']
        ctable = d['ctable']

        # Normalize (all features 0..1)
        ftable_range = {}
        for f in ftable:
            for k in range(0, 121):
                v = f[k]
                if k not in ftable_range:
                    ftable_range[k] = {'min': None, 'max': None}
                if ftable_range[k]['min'] == None or v < ftable_range[k]['min']:
                    ftable_range[k]['min'] = v
                if ftable_range[k]['max'] == None or v > ftable_range[k]['max']:
                    ftable_range[k]['max'] = v

        ftable_normalized = []
        for f in ftable:
            x = []
            for k in range(0, 121):
                v = 0
                if ftable_range[k]['max'] != 0:
                    v = f[k] / ftable_range[k]['max']
                x.append(v)
            ftable_normalized.append(x)

        features_mask = []
        for f in range(0, 121):
            features_mask.append(0)

        r = model({
            'ftable': ftable_normalized,
            'features_mask': features_mask,
            'ctable': ctable
        })
        if r['return'] > 0: return r

        r1 = ck.save_json_to_file({
            'json_file':
            'process_model_using_nearest_neighbour_assemble_features/prepare_reactions_model_train_ref_result.'
            + gx + '.json',
            'dict':
            r
        })
        if r1['return'] > 0: ck.err(r1)

        ref_acc = r['accuracy']  # Reference accuracy

        x = 'Reference accuracy: ' + str(ref_acc)
        s = x + '\n\n'

        ck.out('---------------------')
        ck.out(x)
        ck.out('')

        # Assembling features (adding features one by one) 0:121 - normalized features!
        for k in range(0, 121):
            features_mask[k] = 1

            r = model({
                'ftable': ftable_normalized,
                'features_mask': features_mask,
                'ctable': ctable,
                'skip_out': 'yes'
            })
            if r['return'] > 0: return r

            acc = r['accuracy']

            keep = False
            sx = ''
            if acc < ref_acc:
                keep = True
                sx = 'removed'
            elif acc == ref_acc:
                sx = 'kept'
            elif acc > ref_acc:
                ref_acc = acc
                sx = 'kept (accuracy even improved)'

            if keep:
                features_mask[k] = 0

            x = 'ft' + str(k + 1) + ') ' + str(
                acc) + ' ' + sx + ' (ref acc=' + str(ref_acc) + ')'
            ck.out(x)
            s += x + '\n'

        # Final accuracy
        r = model({
            'ftable': ftable_normalized,
            'features_mask': features_mask,
            'ctable': ctable,
            'skip_out': 'yes'
        })
        if r['return'] > 0: return r

        acc = r['accuracy']

        r1 = ck.save_json_to_file({
            'json_file':
            'process_model_using_nearest_neighbour_assemble_features/prepare_reactions_model_train_reduced_result.'
            + gx + '.json',
            'dict':
            r
        })
        if r1['return'] > 0: ck.err(r1)

        # Final result
        ck.out('')
        ck.out('Final features mask:')
        ck.out('')

        s += '\nFinal features mask:\n\n'
        s1 = ''

        for f in range(0, len(features_mask)):
            x = '  ft' + str(f + 1) + ') ' + str(features_mask[f])
            ck.out(x)
            s += x + '\n'

            if features_mask[f] == 1:
                if s1 != '': s1 += ','
                s1 += 'ft' + str(f + 1)

        s += '\nFinal accuracy: ' + str(acc) + '\n'

        r = ck.save_text_file({
            'text_file':
            'process_model_using_nearest_neighbour_assemble_features/log.' +
            gx + '.txt',
            'string':
            s
        })
        if r['return'] > 0: return r

        r = ck.save_text_file({
            'text_file':
            'process_model_using_nearest_neighbour_assemble_features/influential_features.'
            + gx + '.txt',
            'string':
            s1
        })
        if r['return'] > 0: return r

        ddd[g]['milepost_reduce_complexity1_normalized_ft1_ft121'] = acc

    # Save common data
    r = ck.save_json_to_file({
        'json_file': 'save_all_model_data_tmp.json',
        'dict': ddd
    })
    if r['return'] > 0: return r

    return {'return': 0}
예제 #22
0
def main(i):

    # Load common table file (for all models)
    ddd = {}
    r = ck.load_json_file({'json_file': 'save_all_model_data_tmp.json'})
    if r['return'] == 0:
        ddd = r['dict']

    # Searching for features
    for g in gcc:
        ck.out(
            '********************************************************************************'
        )
        ck.out('Modeling optimizations for ' + g)
        ck.out('')

        if g not in ddd: ddd[g] = {}

        gx = g.replace(' ', '_')

        r = ck.load_json_file(
            {'json_file': 'prepare_train_data_tmp.' + gx + '.json'})
        if r['return'] > 0: ck.err(r)

        d = r['dict']

        ftable = d['ftable']
        ctable = d['ctable']

        # Normalize (all features 0..1)
        ftable_range = {}
        for f in ftable:
            for k in range(0, 121):
                v = f[k]
                if k not in ftable_range:
                    ftable_range[k] = {'min': None, 'max': None}
                if ftable_range[k]['min'] == None or v < ftable_range[k]['min']:
                    ftable_range[k]['min'] = v
                if ftable_range[k]['max'] == None or v > ftable_range[k]['max']:
                    ftable_range[k]['max'] = v

        ftable_normalized = []
        for f in ftable:
            x = []
            for k in range(0, 121):
                v = 0
                if ftable_range[k]['max'] != 0:
                    v = f[k] / ftable_range[k]['max']
                x.append(v)
            ftable_normalized.append(x)

        # ft1..ft56
        for ft in [[0, 56], [0, 65], [0, 121], [56, 65], [56, 121], [65, 121]]:
            ft_start = ft[0]
            ft_stop = ft[1]

            ext = 'ft' + str(ft_start + 1) + '_' + 'ft' + str(ft_stop)

            ck.out('')
            ck.out('Using non-normalized features ' + ext + ' ...')

            r = model({
                'ftable': ftable,
                'ctable': ctable,
                'ft_start': ft_start,
                'ft_stop': ft_stop
            })
            if r['return'] > 0: return r

            ddd[g]['milepost_' + ext] = r['accuracy']

            r1 = ck.save_json_to_file({
                'json_file':
                'process_model_using_nearest_neighbour/process_model_using_nearest_neighbour_'
                + ext + '_tmp.' + gx + '.json',
                'dict':
                r
            })
            if r1['return'] > 0: ck.err(r1)

            # Normalized ft1..ft56
            ck.out('')
            ck.out('Using normalized features ' + ext + ' ...')

            r = model({
                'ftable': ftable_normalized,
                'ctable': ctable,
                'ft_start': ft_start,
                'ft_stop': ft_stop
            })
            if r['return'] > 0: return r

            r1 = ck.save_json_to_file({
                'json_file':
                'process_model_using_nearest_neighbour/process_model_using_nearest_neighbour_'
                + ext + '_tmp.' + gx + '.normalized.json',
                'dict':
                r
            })
            if r1['return'] > 0: ck.err(r1)

            ddd[g]['milepost_normalized_' + ext] = r['accuracy']

    # Save common data
    r = ck.save_json_to_file({
        'json_file': 'save_all_model_data_tmp.json',
        'dict': ddd
    })
    if r['return'] > 0: return r

    return {'return': 0}
def do(i):

    # List performance entries
    r = ck.access({
        'action': 'search',
        'module_uoa': 'experiment',
        'repo_uoa': 'local',
        #                 'repo_uoa':'ck-request-asplos18-results'})
        'data_uoa': '*ck-request-asplos18-caffe-intel-performance-*'
    })
    if r['return'] > 0: return r
    lst = r['lst']

    for q in lst:
        duid = q['data_uid']
        duoa = q['data_uoa']
        ruid = q['repo_uid']
        path = q['path']

        ck.out(duoa)

        # Load entry
        r = ck.access({
            'action': 'load',
            'module_uoa': 'experiment',
            'data_uoa': duid,
            'repo_uoa': ruid
        })
        if r['return'] > 0: return r

        dd = r['dict']
        ruid = r['repo_uid']
        apath = r['path']

        # Check extra info
        if 'inception-v3' in duoa:
            model = 'inception-v3'
            model_species = '1b339ddb13408f8f'
            model_size = 95533753
        elif 'resnet50' in duoa:
            model = 'resnet50'
            model_species = 'd777f6335496db61'
            model_size = 102462397

        if model == '':
            return {'return': 1, 'error': 'model is not recognized'}

        prec = ''
        if '-fp32' in duoa:
            prec = 'fp32'
        elif '-int8' in duoa:
            prec = 'int8'
            model_size = model_size / 4  # Guess

        if prec == '':
            return {'return': 1, 'error': 'model precision is not recognized'}

        # Updating meta if needed
        dd['meta'][
            'scenario_module_uoa'] = 'a555738be4b65860'  # module:request.asplos18

        dd['meta']['model_species'] = model_species  # model.species:mobilenets

        dd['meta'][
            'dataset_species'] = 'ImageNet'  # dataset species (free format)
        dd['meta']['dataset_size'] = 50000  # number of images ...

        dd['meta'][
            'platform_species'] = 'server'  # embedded vs server (maybe other classifications such as edge)

        # Unified full name for some deps
        ds = dd['meta']['deps_summary']

        x = ds['caffemodel']
        r = ck.access({
            'action': 'make_deps_full_name',
            'module_uoa': 'request.asplos18',
            'deps': x
        })
        if r['return'] > 0: return r
        dd['meta']['model_design_name'] = r['full_name']

        x = ds['lib-caffe']
        r = ck.access({
            'action': 'make_deps_full_name',
            'module_uoa': 'request.asplos18',
            'deps': x
        })
        if r['return'] > 0: return r
        dd['meta']['library_name'] = r['full_name']

        x = x['deps']['compiler']
        r = ck.access({
            'action': 'make_deps_full_name',
            'module_uoa': 'request.asplos18',
            'deps': x
        })
        if r['return'] > 0: return r
        dd['meta']['compiler_name'] = r['full_name']

        used_gpgpu = False
        if ds.get('lib-caffe', {}).get('deps', {}).get(
                'compiler-cuda', {}).get('data_name', '') != '':
            used_gpgpu = True

        if used_gpgpu:
            # GPU used
            dd['meta']['cpu_name'] = ''
            dd['meta']['cpu_vendor'] = ''
            dd['meta']['platform_peak_power'] = 180  #Watts
            dd['meta']['platform_price'] = 700  # $
            dd['meta']['platform_price_date'] = '20180101'  # date

        else:
            dd['meta']['gpgpu_name'] = ''
            dd['meta']['gpgpu_vendor'] = ''
            dd['meta']['platform_peak_power'] = 105  #Watts
            dd['meta']['platform_price'] = 1166  # $
            dd['meta']['platform_price_date'] = '20141212'  # date

        dd['meta']['artifact'] = 'e7cc77d72f13441e'  # artifact description

        dd['meta']['model_precision'] = prec

        dd['meta']['processed'] = 'yes'

        # Updating entry
        r = ck.access({
            'action': 'update',
            'module_uoa': 'experiment',
            'data_uoa': duid,
            'repo_uoa': ruid,
            'dict': dd,
            'substitute': 'yes',
            'ignore_update': 'yes',
            'sort_keys': 'yes'
        })
        if r['return'] > 0: return r

        # Checking points to aggregate
        os.chdir(path)
        dperf = os.listdir(path)
        for f in dperf:
            if f.endswith('.cache.json'):
                os.system('git rm -f ' + f)

            elif f.endswith('.flat.json'):
                ck.out(' * ' + f)

                # Load performance file
                p1 = os.path.join(path, f)

                r = ck.load_json_file({'json_file': p1})
                if r['return'] > 0: return r
                d = r['dict']

                d['##features#processed#min'] = 'yes'

                # Clean up keys
                d1 = {}
                for k in d:
                    v = d[k]
                    if not k.startswith('##characteristics#run#inference_latency') and \
                       not k.startswith('##characteristics#run#prediction_time_avg_s') and \
                       not k.startswith('##characteristics#run#inference_throughput') and \
                       not k.startswith('##characteristics#run#usage_cost'):
                        d1[k] = v
                d = d1

                # Unify execution time + batch size
                x = d.get(
                    '##characteristics#run#REAL_ENV_CK_CAFFE_BATCH_SIZE#min',
                    '')
                if x != None and x != '':
                    batch = int(x)
                    d['##features#batch_size#min'] = batch

                    tall = d.get('##characteristics#run#time_fw_s#all', [])

                    if batch == 1:
                        # inference latency
                        d['##features#measuring_latency#min'] = 'yes'

                        r = ck.access({
                            'action': 'stat_analysis',
                            'module_uoa': 'experiment',
                            'dict': d,
                            'dict1': {
                                '##characteristics#run#inference_latency': tall
                            }
                        })
                        if r['return'] > 0: return r

                    tnew = []
                    cnew = []
                    for t in tall:
                        t1 = t / batch
                        tnew.append(t1)

                        c1 = t1 * cost / (60 * 60)
                        if c1 != 0:
                            cnew.append(c1)

                    r = ck.access({
                        'action': 'stat_analysis',
                        'module_uoa': 'experiment',
                        'dict': d,
                        'dict1': {
                            '##characteristics#run#prediction_time_avg_s': tnew
                        }
                    })
                    if r['return'] > 0: return r

                    if len(cnew) > 0:
                        r = ck.access({
                            'action': 'stat_analysis',
                            'module_uoa': 'experiment',
                            'dict': d,
                            'dict1': {
                                '##characteristics#run#usage_cost': cnew
                            }
                        })
                        if r['return'] > 0: return r

                        d['##characteristics#run#usage_cost_per_hour#min'] = cost
                        d['##characteristics#run#usage_cost_date'] = '20180403'

                    # Throughput for all batches
                    if len(tnew) > 0:
                        tall = tnew  # from previous calculation

                        tnew = []
                        for t in tall:
                            t1 = 1 / t
                            tnew.append(t1)

                        r = ck.access({
                            'action': 'stat_analysis',
                            'module_uoa': 'experiment',
                            'dict': d,
                            'dict1': {
                                '##characteristics#run#inference_throughput':
                                tnew
                            }
                        })
                        if r['return'] > 0: return r

                d['##features#model_size#min'] = model_size

                if not used_gpgpu:
                    d['##features#cpu_freq#min'] = 2000
                    d['##features#freq#min'] = d['##features#cpu_freq#min']
                else:
                    d['##features#gpu_freq#min'] = 1600
                    d['##features#freq#min'] = d['##features#gpu_freq#min']

                # Save updated dict
                r = ck.save_json_to_file({
                    'json_file': p1,
                    'dict': d,
                    'sort_keys': 'yes'
                })
                if r['return'] > 0: return r

    return {'return': 0}
예제 #24
0
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, [email protected], http://fursin.net, 2017
#

import ck.kernel as ck
import os
import json

extract_missing_features = False

gcc = ['GCC 4.9.2', 'GCC 7.1.0']

# Load info file
r = ck.load_json_file({'json_file': 'init_reactions_tmp_info.json'})
if r['return'] > 0: ck.err(r)
info = r['dict']

all_classes = info['classes']

# Load features name
r = ck.access({
    'action': 'load',
    'module_uoa': 'module',
    'data_uoa': 'program.static.features'
})
if r['return'] > 0: ck.err(r)
features = r['dict']['milepost_features_description']
milepost_normalization_feature = r['dict']['milepost_normalization_feature']
예제 #25
0
def do(i):

    # List performance entries
    r=ck.access({'action':'search',
                 'module_uoa':'experiment',
                 'data_uoa':'ck-request-asplos18-mobilenets-tvm-arm-performance-*',
                 'repo_uoa':'local'})
    if r['return']>0: return r
    lst=r['lst']

    for q in lst:
        duid=q['data_uid']
        duoa=q['data_uoa']
        path=q['path']

        ck.out(duoa)

        # Search matching accuracy entry
        aduoa=duoa.replace('-performance-','-accuracy-')

        r=ck.access({'action':'find',
                     'module_uoa':'experiment',
                     'data_uoa':aduoa,
                     'repo_uoa':'local'})
        if r['return']>0: return r
        apath=r['path']             

        # Checking points to aggregate
        dperf=os.listdir(path)
        for f in dperf:
            if f.endswith('.flat.json'):
               ck.out(' * '+f)

               # Load performance file 
               p1=os.path.join(path, f)

               r=ck.load_json_file({'json_file':p1})
               if r['return']>0: return r
               d=r['dict']

               p2=os.path.join(path, f[:-10]+'.features_flat.json') # Features

               r=ck.load_json_file({'json_file':p2})
               if r['return']>0: return r
               df=r['dict']

               # Remove batch
               del(df['##choices#env#CK_BATCH_COUNT'])
               
               # Find matching features file to merge
               dacc=os.listdir(apath)
               matched=False
               for af in dacc:
                   if af.endswith('.features_flat.json'):
                      r=ck.load_json_file({'json_file':os.path.join(apath,af)})
                      if r['return']>0: return r
                      adf=r['dict']

                      # Remove batch
                      del(adf['##choices#env#CK_BATCH_COUNT'])

                      # Compare dicts
                      r=ck.compare_dicts({'dict1':df, 'dict2':adf})
                      if r['return']>0: return r
                      if r['equal']=='yes':
                         matched=True

                         # Load accuracy data to merge
                         px=os.path.join(apath,af[:-19]+'.flat.json')
                         r=ck.load_json_file({'json_file':px})
                         if r['return']>0: return r
                         dd=r['dict']

                         # Merge keys
                         for k in dd:
                             if k.startswith('##characteristics#run#accuracy_top'):
                                d[k]=dd[k]

                         break
               
               if not matched:
                  return {'return':1, 'error':'no match - strange'}

               # Save updated dict
               r=ck.save_json_to_file({'json_file':p1, 'dict':d, 'sort_keys':'yes'})
               if r['return']>0: return r

    return {'return':0}
def do(i):

    # List performance entries
    r=ck.access({'action':'search',
                 'module_uoa':'experiment',
                 'data_uoa':'mobilenets-performance-*'
#                 'repo_uoa':'ck-request-asplos18-results'
                })
    if r['return']>0: return r
    lst=r['lst']

    for q in lst:
        duid=q['data_uid']
        duoa=q['data_uoa']
        ruid=q['repo_uid']
        path=q['path']

        ck.out(duoa)

        # Search matching accuracy entry
        r=ck.access({'action':'load',
                     'module_uoa':'experiment',
                     'data_uoa':duid,
                     'repo_uoa':ruid})
        if r['return']>0: return r

        dd=r['dict']
        ruid=r['repo_uid']
        apath=r['path']             

        # Updating meta if needed
        dd['meta']['scenario_module_uoa']='a555738be4b65860' # module:request.asplos18

        dd['meta']['model_species']='07d4e7aa3750ddc6' # model.species:mobilenets

        dd['meta']['dataset_species']='ImageNet' # dataset species (free format)
        dd['meta']['dataset_size']=500 # number of images ...

        dd['meta']['platform_species']='embedded' # embedded vs server (maybe other classifications such as edge)

        dd['meta']['platform_peak_power']=4.5 #Watts
        dd['meta']['platform_price']=239 # $
        dd['meta']['platform_price_date']='20170425' # date

        dd['meta']['artifact']='08da9685582866a0' # artifact description

        dd['meta']['model_precision']='fp32'

        dd['meta']['processed']='yes'

        # Unified full name for some deps
        ds=dd['meta']['deps_summary']

        x=ds['weights']
        r=ck.access({'action':'make_deps_full_name','module_uoa':'request.asplos18','deps':x})
        if r['return']>0: return r
        dd['meta']['model_design_name']=r['full_name']

        x=ds['compiler']
        r=ck.access({'action':'make_deps_full_name','module_uoa':'request.asplos18','deps':x})
        if r['return']>0: return r
        dd['meta']['compiler_name']=r['full_name']

        x=ds['library']
        r=ck.access({'action':'make_deps_full_name','module_uoa':'request.asplos18','deps':x})
        if r['return']>0: return r
        dd['meta']['library_name']=r['full_name']

        # Updating entry
        r=ck.access({'action':'update',
                     'module_uoa':'experiment',
                     'data_uoa':duid,
                     'repo_uoa':ruid,
                     'dict':dd,
                     'substitute':'yes',
                     'ignore_update':'yes',
                     'sort_keys':'yes'
                    })
        if r['return']>0: return r

        # Checking points to aggregate
        os.chdir(path)
        dperf=os.listdir(path)
        for f in dperf:
            if f.endswith('.cache.json'):
               os.system('git rm -f '+f)

            elif f.endswith('.flat.json'):
               ck.out(' * '+f)

               # Load performance file 
               p1=os.path.join(path, f)

               r=ck.load_json_file({'json_file':p1})
               if r['return']>0: return r
               d=r['dict']

               mult=d.get('##choices#env#CK_ENV_MOBILENET_WIDTH_MULTIPLIER#min','')

               if mult==0.25: size=1990786
               elif mult==0.5: size=5459810
               elif mult==0.75: size=10498594
               elif mult==1.0: size=17106694
               else:
                  return {'return':1, 'error':'unknown width multiplier "'+str(mult)+'"'}

               d['##features#model_size#min']=size

               d['##features#gpu_freq#min']=807
               d['##features#cpu_freq#min']=''
               d['##features#freq#min']=d['##features#gpu_freq#min']

               d['##features#processed#min']='yes'

               # Add throughput (images/second)
               tall=d.get('##characteristics#run#prediction_time_avg_s#all',[])
               if len(tall)>0:
                  tnew=[]
                  for t in tall:
                      t1=1/t
                      tnew.append(t1)
                  
                  r=ck.access({'action':'stat_analysis',
                               'module_uoa':'experiment',
                               'dict':d,
                               'dict1':{'##characteristics#run#inference_throughput':tnew}
                              })
                  if r['return']>0: return r

               # Unify batch size
               x=d.get('##choices#env#CK_BATCH_SIZE#min','')
               if x!=None and x!='':
                  batch=int(x)
                  d['##features#batch_size#min']=batch

                  if batch==1:
                     # inference latency
                     d['##features#measuring_latency#min']='yes'

                     r=ck.access({'action':'stat_analysis',
                                  'module_uoa':'experiment',
                                  'dict':d,
                                  'dict1':{'##characteristics#run#inference_latency':tall}
                                 })
                     if r['return']>0: return r

               # Save updated dict
               r=ck.save_json_to_file({'json_file':p1, 'dict':d, 'sort_keys':'yes'})
               if r['return']>0: return r

    return {'return':0}
예제 #27
0
import ck.kernel as ck

r = ck.access('list #milepost* out=none')
if r['return'] > 0:
    print 'Error: ' + r['error']
    exit(1)

lst = r['lst']

r = ck.load_json_file({'json_file': 'convert2.json'})
if r['return'] > 0:
    print 'Error: ' + r['error']
    exit(1)
d1 = r['dict']

dtags = {}

for q in lst:
    m = q['module_uoa']
    d = q['data_uoa']

    print('***********************************')
    print(d)

    r = ck.access({'action': 'load', 'module_uoa': m, 'data_uoa': d})
    if r['return'] > 0:
        print 'Error: ' + r['error']
        exit(1)

    dd = r['dict']
    dm = r['info']
예제 #28
0
파일: config.py 프로젝트: jmrtin72/cbench
def update(i):
    """
    Input:  {
              (force) [bool] - if True, force update
            }

    Output: {
              return  [int]    - return code = 0 if success or >0 if error
              (error) [str]    - error string if return>0 
            }
    """

    import os

    global bootstrapping
    bootstrapping = True

    force = i.get('force')
    cfg = i.get('cfg', {})

    from . import obj

    title = 'Bootstrapping'
    if cfg.get('bootstrapped', '') == 'yes': title = 'Updating'

    ck.out(title + ' cBench to support portable actions and workflows:')
    ck.out('')

    # Check release notes
    server_url = cfg.get('server_url', '')
    if server_url == '': server_url = 'https://cKnowledge.io/api/v1/?'

    from . import comm_min
    r = comm_min.send({
        'url': server_url,
        'action': 'event',
        'dict': {
            'type': 'get-cbench-bootstrap-notes'
        }
    })

    notes = r.get('notes', '')
    if notes != '':
        ck.out('***********************************************')
        ck.out(notes)
        ck.out('***********************************************')

    lst_all = []

    sbf = os.environ.get('CB_SAVE_BOOTSTRAP_FILES', '')

    if sbf == '':
        fboot = 'cb-bootstrap-20200529'
        files = [fboot + '.json']

        if os.name == 'nt':
            files.append(fboot + '-win.json')

        for fn in files:
            r = ck.gen_tmp_file({'prefix': 'cb-bootstrap-', 'suffix': '.json'})
            if r['return'] > 0: return r
            ftmp = r['file_name']

            burl = CR_DEFAULT_SERVER + '/static/bootstrap/' + fn

            ck.out('Downloading ' + burl)

            from . import comm

            rx = comm.download_file({'url': burl, 'file': ftmp})
            if rx['return'] > 0: return rx

            rx = ck.load_json_file({'json_file': ftmp})
            if rx['return'] > 0: return rx

            lst_all += rx['dict']

            os.remove(ftmp)

        r = obj.download({'components': lst_all, 'force': force})
        if r['return'] > 0 and r['return'] != 8: return r

    else:
        for x in CR_SOLUTION_CK_COMPONENTS:
            r = obj.download({
                'cid': x['cid'],
                'version': x.get('version', ''),
                'force': force
            })
            if r['return'] > 0:
                if r['return'] != 8: return r
                else: ck.out('    Skipped - already exists!')
            else:
                lst_all += r['components']

        rx = ck.save_json_to_file({
            'json_file': sbf,
            'dict': lst_all,
            'sort_keys': 'yes'
        })
        if rx['return'] > 0: return rx

    ck.out('')

    # Update cfg
    cfg['bootstrapped'] = 'yes'

    ii = {
        'action': 'update',
        'repo_uoa': CK_CFG_REPO_UOA,
        'module_uoa': CK_CFG_MODULE_UID,
        'data_uoa': CK_CFG_DATA_UOA,
        'dict': cfg,
        'sort_keys': 'yes'
    }

    r = ck.access(ii)

    ck.out(title + ' finished!')
    ck.out('')

    return r