コード例 #1
0
def neural_residual(root_dir):
    # model selection
    net_type = load(root_dir + '/configuration.json')['net_type']
    if net_type == 'Net':
        net = nets.Net()
    elif net_type == 'Net2c':
        net = nets.Net2c()
    elif net_type == 'CNN1c':
        net = nets.CNN1c()
    elif net_type == 'CNN2c':
        net = nets.CNN2c()
    else:
        print('invalide net type')
        raise ValueError

    # get the latest model for neural network
    epoch_path = path_list(root_dir + '/models/')[-1]
    model_path = path_list(epoch_path, filter='pt')[-1]
    net.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))

    # get inputs, labels, outputs and residuals
    inputs = load(root_dir + '/test_inputs.tensor').float()
    labels = load(root_dir + '/test_labels.tensor').float().numpy()
    outputs = net(inputs).detach().cpu().clone().numpy()
    residuals = (outputs - labels) * 1000

    return residuals.T
コード例 #2
0
def run():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_root',
                        type=str,
                        required=True,
                        help='model directory for nn')
    parser.add_argument(
        '--input_type',
        type=str,
        required=True,
        help='input type among "hit", "time", "hit-time", "hit-time-2c".')
    parser.add_argument(
        '--net_type',
        type=str,
        required=True,
        help='net type among "Net", "Net2c", "CNN1c", "CNN2c".')
    args = parser.parse_args()
    model_root = args.model_root
    input_type = args.input_type
    net_type = args.net_type

    mkdir('SRC_input2output/')
    p = Pool(processes=40)
    p.starmap(
        job,
        zip(path_list(dir='SRC_json2input/', filter='.json'),
            repeat(model_root), repeat(input_type), repeat(net_type)))
コード例 #3
0
def draw_activation(root_dir):
    # model selection
    configuration = load(root_dir + '/configuration.json')
    # configuration = load('configuration.json')
    mode = configuration['mode']
    net_type = configuration['net_type']
    if net_type == 'Net':
        net = nets.Net()
    elif net_type == 'Net2c':
        net = nets.Net2c()
    elif net_type == 'CNN1c':
        net = nets.CNN1c()
    elif net_type == 'CNN2c':
        net = nets.CNN2c()
    else:
        print('invalide net type')
        raise ValueError

    # get the latest model for neural network
    epoch_path = path_list(root_dir + '/models/')[-1]
    model_path = path_list(epoch_path, filter='pt')[-1]
    # model_path = '00300.pt'
    net.load_state_dict(
        torch.load(model_path, map_location=torch.device('cpu')))

    for module in net.modules():
        if isinstance(module, nn.Linear):
            weight = module.weight.detach().numpy()
            plt.plot(np.sum(np.abs(weight), axis=0),
                     linestyle='None',
                     marker='.')
            plt.title('%s with %s' % (mode, net_type))
            plt.xlabel('input node #')
            plt.ylabel('absolute sum of node weight')
            plt.xlim([0, 708])
            plt.ylim([0, 15])
            plt.grid()
            plt.tight_layout()
            plt.show()
            plt.savefig('MC_vis_activation_' + root_dir + '.png')
            plt.close()
            break
コード例 #4
0
ファイル: resource.py プロジェクト: fahdely/rectenv
 def get_service_list(self):
     ret=[]
     if self._which("systemctl"):
         p = subprocess.Popen("systemctl --all --full list-units | grep \.service", stdout=subprocess.PIPE, shell=True)
         (po, pe) = p.communicate()
         p.wait()
         if po is not None and len(po)>0:
             appar = po.split("\n")
             for appln in appar:
                 sv = ""
                 stcnt = -1
                 stapp = ""
                 ar = appln.split(" ")
                 for k in ar:
                     if len(k)>0:
                         if stcnt == -1:
                             if k.endswith(".service"):
                                 sv+=k[0:len(k)-8]
                                 stcnt+=1
                             else:
                                 sv+=k
                         else:
                             stcnt+=1
                             if stcnt==3:
                                 stapp=k
                 if stcnt != -1:
                     if  stapp == "running":
                         st = 4
                     else:
                         st = 1
                     ret.append({"Name":sv,"Label":"","Status":st})
     else:
         #SYSVINIT
         for x in utils.path_list('/etc/init.d'):
             if x.lower()!="rc" and x.lower()!="rcs" and x.lower()!="halt" and x.lower()!="reboot" and x.lower()!="single":
                 xp = "/etc/init.d/" + x
                 st = utils.path_stat(xp)
                 if bool(st.st_mode & stat.S_IXUSR) or bool(st.st_mode & stat.S_IXGRP) or bool(st.st_mode & stat.S_IXOTH):                        
                     appf = utils.file_open("/etc/init.d/" + x)
                     apps = appf.read()
                     appf.close()                        
                     if "status)" in apps or "status|" in apps:  
                         p = subprocess.Popen("/etc/init.d/" + x + " status", stdout=subprocess.PIPE, shell=True)
                         (po, pe) = p.communicate()
                         p.wait()
                         if po is not None and len(po)>0:
                             st = 999
                             if "running" in po.lower() or "started" in po.lower():
                                 st = 4
                             elif "not running" in po.lower() or "not started" in po.lower() or "failed" in po.lower():
                                 st = 1
                             ret.append({"Name":x,"Label":"","Status":st})
     return ret
コード例 #5
0
ファイル: sharedmem.py プロジェクト: fahdely/rectenv
def init_path():
    if not utils.path_exists(SHAREDMEM_PATH):
        utils.path_makedir(SHAREDMEM_PATH)
    else:
        #Elimina tutti i file
        lst = utils.path_list(SHAREDMEM_PATH)
        for fname in lst:
            try:
                if fname[0:7] == "stream_":
                    if utils.path_exists(SHAREDMEM_PATH + utils.path_sep +
                                         fname):
                        utils.path_remove(SHAREDMEM_PATH + utils.path_sep +
                                          fname)
            except:
                None
コード例 #6
0
 def _cpmv(self, tp, fs, fd, replace):
     bok = True
     if utils.path_isdir(fs):
         if not utils.path_exists(fd):
             utils.path_makedirs(fd)
             if tp=="copy":
                 self._agent_main.get_osmodule().fix_file_permissions("COPY_DIRECTORY",fd, fs)
             elif tp=="move":
                 self._agent_main.get_osmodule().fix_file_permissions("MOVE_DIRECTORY",fd, fs)
         lst=None
         try:
             lst=utils.path_list(fs)
             for fname in lst:
                 b = self._cpmv(tp, fs + utils.path_sep + fname, fd + utils.path_sep + fname, replace)
                 if bok is True:
                     bok = b
         except Exception:
             bok=False
         if tp=="move":
             try:
                 utils.path_remove(fs)
             except Exception:
                 bok=False
     else:
         b=True
         if utils.path_exists(fd):
             if replace is True:
                 try:
                     utils.path_remove(fd)
                 except Exception:
                     bok = False
                     b = False
             else:
                 b = False
         if b is True:
             try:
                 if tp=="copy":
                     utils.path_copy(fs, fd)
                     self._agent_main.get_osmodule().fix_file_permissions("COPY_FILE",fd, fs)
                 elif tp=="move":
                     utils.path_move(fs, fd)
                     self._agent_main.get_osmodule().fix_file_permissions("MOVE_FILE",fd)
             except Exception:
                 bok=False
     return bok
コード例 #7
0
 def _set_permissions(self, fs, params, recursive):
     bok = True
     if not utils.path_islink(fs):
         try:
             self._osnative.set_file_permissions(fs,params)
         except Exception:
             bok=False
         if recursive and utils.path_isdir(fs):
             lst=None
             try:
                 lst=utils.path_list(fs)
                 for fname in lst:
                     b = self._set_permissions(fs + utils.path_sep + fname, params, recursive)
                     if bok is True:
                         bok = b
             except Exception:
                 bok=False
     return bok
コード例 #8
0
ファイル: resource.py プロジェクト: fahdely/rectenv
 def _get_service_list(self):
     import xml.etree.ElementTree as ET
     ret={}
     path='/System/Library/LaunchDaemons'
     for x in utils.path_list(path):
         try:
             bok=False
             tree = ET.parse(path + "/" + x)
             root = tree.getroot()
             for dict in root:
                 if dict.tag=="dict":
                     for child in dict:
                         if bok==True:
                             if child.tag.lower()=="string":
                                 ret[child.text]=path + "/" + x
                                 break
                         if child.tag.lower()=="key" and child.text.lower()=="label":
                             bok=True
                 if bok==True:
                     break     
         except Exception as e:
             None
     return ret
コード例 #9
0
ファイル: resource.py プロジェクト: fahdely/rectenv
 def get_task_list(self):
     import pwd
     ret = []
     for x in utils.path_list('/proc') :
         if x.isdigit():
             try:
                 itm={}
                 #PID
                 itm["PID"]=long(x)
                 #Name
                 f = utils.file_open("/proc/%s/stat" % x)
                 try:
                     itm["Name"] = f.read().split(' ')[1].replace('(', '').replace(')', '')
                 finally:
                     f.close()
                 #Memory
                 f = utils.file_open("/proc/%s/statm" % x)
                 try:
                     vms, rss = f.readline().split()[:2]
                     itm["Memory"] = long(rss) * long(self._PAGESIZE)
                     #int(vms) * _PAGESIZE)
                 finally:
                     f.close()
                 #Owner
                 f = utils.file_open("/proc/%s/status" % x)
                 try:
                     for line in f:
                         if line.startswith('Uid:'):
                                 r = line.split()
                                 itm["Owner"] = pwd.getpwuid(int(r[1])).pw_name
                                 break
                 finally:
                     f.close()
                 ret.append(itm)
             except:
                 None
     return ret
コード例 #10
0
def run():
    X = []
    Y_rec_cwm = []
    Y_rec_nn = []
    Y_width_cwm = []
    Y_width_nn = []
    Y_err_cwm = []
    Y_err_nn = []

    paths = path_list('SRC_input2output/')
    for path in paths:
        f = load(path)
        z = f['info']['z']
        if z in X:
            break
        print(f'working on z={z} in {path}')

        # get 3-axis outputs of cwm, nn
        outputs_cwm = f['outputs_cwm']
        outputs_nn = f['outputs_nn']

        # filter desired ranges
        filtered_cwm = [filter_nsigma(outputs, n=2) for outputs in outputs_cwm]
        filtered_nn = [filter_nsigma(outputs, n=2) for outputs in outputs_nn]

        # get rec
        rec_cwm = [np.mean(output) for output in filtered_cwm]
        rec_nn = [np.mean(output) for output in filtered_nn]

        # get std
        std_cwm = [np.std(output) for output in filtered_cwm]
        std_nn = [np.std(output) for output in filtered_nn]

        # get std error
        se_cwm = [standard_error(output) for output in filtered_cwm]
        se_nn = [standard_error(output) for output in filtered_nn]

        # make x, y axis data
        X.append(z)
        Y_rec_cwm.append(rec_cwm)
        Y_rec_nn.append(rec_nn)
        Y_width_cwm.append(std_cwm)
        Y_width_nn.append(std_nn)
        Y_err_cwm.append(se_cwm)
        Y_err_nn.append(se_nn)

    # make np array, and transpose
    X = np.array(X)
    Y_rec_cwm = np.array(Y_rec_cwm)
    Y_rec_nn = np.array(Y_rec_nn)
    Y_width_cwm = np.array(Y_width_cwm).T
    Y_width_nn = np.array(Y_width_nn).T
    Y_err_cwm = np.array(Y_err_cwm).T
    Y_err_nn = np.array(Y_err_nn).T

    Y_residual_cwm = np.array(Y_rec_cwm).T
    Y_residual_cwm[2] += -X
    Y_residual_nn = np.array(Y_rec_nn).T
    Y_residual_nn[2] += -X

    # sigmoid correction

    # draw plots: true, rec position
    fig, axes = plt.subplots(1, 3, figsize=(14, 4))
    for axis in range(3):
        # plot cwm, nn
        if axis is 2:
            axes[axis].errorbar(X, Y_residual_cwm[axis] + X, yerr=Y_err_cwm[axis], label='cwm with correction', color='black', markersize=3, linewidth=1, capsize=3, fmt='o')
            axes[axis].errorbar(X, Y_residual_nn[axis] + X, yerr=Y_err_nn[axis], label='neural network', color='r', markersize=3, linewidth=1, capsize=3, fmt='o')
        else:
            axes[axis].errorbar(X, Y_residual_cwm[axis], yerr=Y_err_cwm[axis], label='cwm with correction', color='black', markersize=3, linewidth=1, capsize=3, fmt='o')
            axes[axis].errorbar(X, Y_residual_nn[axis], yerr=Y_err_nn[axis], label='neural network', color='r', markersize=3, linewidth=1, capsize=3, fmt='o')

        # axes properties
        axis_name = ['x', 'y', 'z'][axis]
        axes[axis].set_xlabel(r'$z_{src}$ (mm)')
        axes[axis].set_ylabel(r'$%s_{rec}$ (mm)' % axis_name)
        axes[axis].grid()
        axes[axis].legend(fontsize=8, loc='upper left')
        if axis is 2:
            axes[axis].set_ylim([-1600, 1600])
        else:
            axes[axis].set_ylim([-100, 100])
    plt.tight_layout()
    plt.savefig('1_rec.png')
    plt.close()

    # draw plots: residual
    fig, axes = plt.subplots(1, 3, figsize=(14, 4))
    for axis in range(3):
        # plot cwm, nn
        axes[axis].errorbar(X, Y_residual_cwm[axis], yerr=Y_err_cwm[axis], label='cwm with correction', color='black', markersize=3, linewidth=1, capsize=3, fmt='o')
        axes[axis].errorbar(X, Y_residual_nn[axis], yerr=Y_err_nn[axis], label='neural network', color='r', markersize=3, linewidth=1, capsize=3, fmt='o')

        # axes properties
        axis_name = ['x', 'y', 'z'][axis]
        axes[axis].set_xlabel(r'$z_{src}$ (mm)')
        axes[axis].set_ylabel(r'$%s_{rec}-%s_{src}$ (mm)' % (axis_name, axis_name))
        axes[axis].grid()
        axes[axis].legend(fontsize=8, loc='upper left')
        if axis is 2:
            axes[axis].set_ylim([-150, 150])
        else:
            axes[axis].set_ylim([-100, 100])
    plt.tight_layout()
    plt.savefig('2_residual.png')
    plt.close()

    # draw plots: width
    fig, axes = plt.subplots(1, 3, figsize=(14, 4))
    for axis in range(3):
        # plot cwm, nn
        axes[axis].errorbar(X, Y_width_cwm[axis], yerr=Y_err_cwm[axis], label='cwm with correction', color='black', markersize=3, linewidth=1, capsize=3, fmt='o')
        axes[axis].errorbar(X, Y_width_nn[axis], yerr=Y_err_nn[axis], label='neural network', color='r', markersize=3, linewidth=1, capsize=3, fmt='o')

        # axes properties
        axis_name = ['x', 'y', 'z'][axis]
        axes[axis].set_xlabel(r'$z_{src}$ (mm)')
        axes[axis].set_ylabel('%s width (mm)' % axis_name)
        axes[axis].grid()
        axes[axis].set_ylim([150, 330])
        axes[axis].legend(fontsize=8, loc='upper left')
    plt.tight_layout()
    plt.savefig('3_width.png')
    plt.close()

    # draw plots: improvement
    fig, axes = plt.subplots(1, 3, figsize=(14, 4))
    for axis in range(3):
        # plot cwm, nn
        improvement = (Y_width_nn[axis] - Y_width_cwm[axis]) / Y_width_cwm[axis] * 100
        mean_imp = np.mean(improvement)
        axes[axis].plot(X, improvement, marker='.', color='black')
        axes[axis].axhline(y=mean_imp, color='blue', linestyle=':')

        # axes properties
        axis_name = ['x', 'y', 'z'][axis]
        axes[axis].set_xlabel(r'$z_{src} (mm)$')
        axes[axis].set_ylabel('$(\sigma_{%s, nn}-\sigma_{%s, cwm})/\sigma_{%s, cwm}$ (%%)' % (axis_name, axis_name, axis_name))
        axes[axis].set_ylim([-40, 15])
        axes[axis].grid()

        axes[axis].text(0, mean_imp-2, 'mean=%.1f%%' % mean_imp, ha='left', va='top', color='blue')
    plt.tight_layout()
    plt.savefig('4_improvement.png')
    plt.close()
コード例 #11
0
    def req_list(self, cinfo ,params):
        path = agent.get_prop(params,'path',None)
        
        
        only_dir = agent.str2bool(agent.get_prop(params, "onlyDir", "false"))
        only_file = agent.str2bool(agent.get_prop(params, "onlyFile", "false"))
        app_name=agent.get_prop(params, "app")
        #image_info = agent.str2bool(agent.get_prop(params, "imageInfo", "false"))
        
        
        ptfilter = agent.get_prop(params, "filter", None)
        ptfilter_ignorecase = agent.str2bool(agent.get_prop(params, "filterIgnoreCase", "false"))
        ptfilterList= agent.get_prop(params, "filterList", None)
        refilter = None
        if ptfilter is not None:
            if ptfilter_ignorecase:
                refilter = re.compile(ptfilter)
            else:
                refilter = re.compile(ptfilter,re.IGNORECASE)
        arfilterList = None
        if ptfilterList is not None:
            arfilterList = json.loads(ptfilterList)
        
        arret=[]
        if path=="$":
            if app_name is None:
                prms=self.get_permission(cinfo);
            else:
                prms=self.get_permission(cinfo,app_name);
            if prms["fullAccess"]:
                ar = self._osnative.get_resource_path()
                for i in range(len(ar)):
                    itm={}
                    app=ar[i]
                    itm["Name"]=u"D:" + app["Name"];
                    if "Size" in app:
                        itm["Length"]=app["Size"];
                    arret.append(itm)
            else:
                for permpt in prms["paths"]:
                    arret.append({'Name': 'D:' + permpt["name"]})
                    
        else:
            lst=None            
            options={}
            if app_name is not None:
                options["app"]=app_name
            pdir =self.check_and_replace_path(cinfo, path, self.OPERATION_VIEW,options)
            if not utils.path_isdir(pdir):
                raise Exception("Permission denied or read error.");
            try:
                lst=utils.path_list(pdir)
            except Exception:
                raise Exception("Permission denied or read error.");
            #Carica la lista
            for fname in lst:
                #DA GESTIRE COSI EVITA GLI ERRORI MA PER FILENAME NON UTF8 NON RECUPERA ALTRE INFO TIPO LA DIMENSIONE E DATAMODIFICA
                if not isinstance(fname, unicode):
                    fname=fname.decode("utf8","replace")
                
                if pdir==utils.path_sep:
                    fp = pdir + fname
                else:
                    fp = pdir + utils.path_sep + fname
                if (self._osnative.is_file_valid(fp)) and ((not only_dir and not only_file) or (only_dir and utils.path_isdir(fp)) or (only_file and not utils.path_isdir(fp))):
                    bok = True
                    if refilter is not None:
                        bok = refilter.match(fname);
                    if bok and arfilterList is not None:
                        for appnm in arfilterList:
                            bok = False
                            if ptfilter_ignorecase:
                                bok = (fname.lower()==appnm.lower())
                            else:  
                                bok = (fname==appnm)
                            if bok:
                                break
                    if bok is True:
                        self._append_to_list(arret, pdir, fname)
                        # if (image_info==True): DA GESTIRE Width e Height  se file di tipo Immagini

        #ORDINA PER NOME
        arret = sorted(arret, key=lambda k: k['Name'].lower())
         
        jsret = {'items' : arret, 'permissions': {"apps":{}}}
        if path!="$" and app_name is None:
            a = jsret["permissions"]["apps"]
            paths = self.get_permission_path(cinfo, u"#FILESYSTEM://" + path, {"app":"texteditor" ,"check_exists": False})
            if len(paths)>0:
                a["texteditor"]={}
            paths = self.get_permission_path(cinfo, u"#FILESYSTEM://" + path, {"app":"logwatch" ,"check_exists": False})
            if len(paths)>0:
                a["logwatch"]={}
            
            None
         
        return json.dumps(jsret)
コード例 #12
0
def get_nn_outputs(model_directory, net_type, inputs, epoch, gpu=True):
    if net_type == 'Net':
        net = nets.Net()
    elif net_type == 'Net2c':
        net = nets.Net2c()
    elif net_type == 'CNN1c':
        net = nets.CNN1c()
    elif net_type == 'CNN2c':
        net = nets.CNN2c()
    else:
        print('invalid net_type: ' + net_type)
        raise ValueError

    if gpu and torch.cuda.is_available():
        device = torch.device('cuda')
        # data parallelism
        if torch.cuda.device_count() > 1:
            print('currently using ' + str(torch.cuda.device_count()) +
                  ' cuda devices.')
            net = nn.DataParallel(net)
    else:
        device = torch.device('cpu')

    net.to(device)

    # load state
    model = torch.load(path_list(model_directory +
                                 '/models/epoch_%05i/' % epoch,
                                 filter='pt')[-1],
                       map_location=device)
    net.load_state_dict(model)

    # convert inputs to tensor
    inputs = torch.FloatTensor(inputs)
    inputs.to(device)

    # get outputs
    outputs = net(inputs).detach().cpu().clone().numpy().T
    outputs *= 1000

    # MC data -> raw data transfrom
    # correction related to attenuation length from cwm (2018 -> 2013)
    # r' = [(a-b)/R * r + b] * r                    -> [c * r + d] * r
    # z' = [(a-b)/R * r + b] * z                    -> [c * r + d] * z
    # r = [R/2(a-b)] * [-b + sqrt(b^2 + 4(a-b)/R * r')]  -> (1/2c) * (-d + sqrt(d^2 + 4cr'))
    # z = 1/(c * r + d) * z'
    # weight2 = 0.8784552 - 0.0000242758 * __perp(reco_vertex)  # base point -> 2018
    # c = -0.0000242758
    c = (0.8375504 - 0.8784552) / 1685
    d = 0.8784552
    outputs_r = np.sqrt(outputs[0]**2 + outputs[1]**2)
    outputs_r0 = 1 / (2 * c) * (-d + np.sqrt(np.square(d) + 4 * c * outputs_r))
    outputs *= 1 / (c * outputs_r0 + d)

    # raw data -> source data transform
    # weight2 = 1.09723 + (1.04556 - 1.09723) / 1685 * __perp(reco_vertex)    # base point -> 2013
    c = (1.04556 - 1.09723) / 1685
    d = 1.09723
    outputs_r0 = np.sqrt(outputs[0]**2 + outputs[1]**2)
    outputs *= (c * outputs_r0 + d)

    return outputs
コード例 #13
0
def run():
    mkdir('SRC_json2input/')

    # multiprocessing
    p = Pool(processes=40)
    p.map(job, path_list(dir='SRC_prd2json/', filter='.json'))
コード例 #14
0
def main():
    # Argument configuration
    parser = argparse.ArgumentParser()

    # general settings
    parser.add_argument('--root',
                        type=str,
                        default='MC',
                        help='MC root directory')
    parser.add_argument('--input',
                        type=str,
                        default='prompt',
                        help='input type (prompt, delated, all)')
    parser.add_argument('--output',
                        type=str,
                        default='prompt',
                        help='output type (prompt, delated, all)')
    parser.add_argument('--mode',
                        type=str,
                        required=True,
                        help='mode for input (hit, time)')

    # control settings
    parser.add_argument('--num_dataset',
                        type=int,
                        default=0,
                        help='number of dataset, if 0, use all')
    parser.add_argument('--dead_pmt',
                        type=int,
                        default=0,
                        help='is dead PMT on or not.')
    parser.add_argument('--fast',
                        type=int,
                        default=0,
                        help='for testing, skip filtering.')

    # learning settings
    parser.add_argument('--net_type',
                        type=str,
                        required=True,
                        help='network using (Net, Net2c, Cnn1c, Cnn2c')
    parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
    parser.add_argument('--batch',
                        type=int,
                        default=128,
                        help='batch size, multiplied by cuda device number')
    parser.add_argument('--worker',
                        type=int,
                        default=40,
                        help='num_worker of dataloader')
    parser.add_argument('--epoch',
                        type=int,
                        default=40,
                        help='number of epochs')

    # optional settings
    parser.add_argument('--text',
                        type=str,
                        default='',
                        help='additional text to test save directory')

    # Parse arguments
    args = parser.parse_args()

    # general settings
    root_directory = args.root
    input_type = args.input
    output_type = args.output
    mode = args.mode

    # control settings
    num_dataset = args.num_dataset
    dead_pmt = args.dead_pmt
    fast = args.fast

    # learning settings
    net_type = args.net_type
    lr = args.lr
    batch_size = args.batch * (torch.cuda.device_count()
                               if torch.cuda.is_available() else 1)
    num_worker = args.worker
    num_epochs = args.epoch

    # optional settings
    text = args.text

    # Make save directory
    save_directory = datetime.datetime.now().strftime("%Y%m%d-%H%M")
    save_directory += '-' + root_directory
    save_directory += '-' + mode
    save_directory += '-' + net_type
    save_directory += '-' + input_type + '_' + output_type
    save_directory += '-e' + str(num_epochs)
    if dead_pmt:
        save_directory += '-dead'
    if num_dataset:
        save_directory += '-d' + str(int(num_dataset / 1000)) + 'k'
    if fast:
        save_directory += '-fast'
    if text:
        save_directory += '-' + text
    print('save directory: ' + save_directory)

    # Load dataset paths
    print('loading path list')
    paths = path_list(root_directory, filter='.json', shuffle=True)
    if not fast:
        print('zero input data filtering')
        paths = filter_zero_counts(paths, input_type)
    if num_dataset:
        print('data size limit:', num_dataset)
        paths = paths[:num_dataset]

    # Prepare trainset
    print('preparing trainset')
    trainpaths = paths[:int(len(paths) * 0.8)]
    trainset = JsonDataset(trainpaths, mode, net_type, input_type, output_type,
                           dead_pmt)
    trainloader = DataLoader(trainset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=num_worker)
    save(trainpaths, save_directory + '/trainpaths.list')

    # prepare valiset
    print('preparing valiset')
    valipaths = paths[int(len(paths) * 0.8):int(len(paths) * 0.9)]
    valiset = JsonDataset(valipaths, mode, net_type, input_type, output_type,
                          dead_pmt)
    valiloader = DataLoader(valiset,
                            batch_size=batch_size,
                            shuffle=True,
                            num_workers=num_worker)
    vali_inputs, vali_labels = load_all(valiloader)
    save(valipaths, save_directory + '/valipaths.list')
    save(vali_inputs, save_directory + '/vali_inputs.tensor')
    save(vali_labels, save_directory + '/vali_labels.tensor')

    # prepare testset
    print('preparing testset')
    testpaths = paths[int(len(paths) * 0.9):]
    testset = JsonDataset(testpaths, mode, net_type, input_type, output_type,
                          dead_pmt)
    testloader = DataLoader(testset,
                            batch_size=batch_size,
                            shuffle=True,
                            num_workers=num_worker)
    test_inputs, test_labels = load_all(testloader)
    save(testpaths, save_directory + '/testpaths.list')
    save(test_inputs, save_directory + '/test_inputs.tensor')
    save(test_labels, save_directory + '/test_labels.tensor')

    # Network, criterion, optimizer
    print('creating net, criterion, optimizer')
    if net_type == 'Net':
        net = nets.Net()
    elif net_type == 'Net2c':
        net = nets.Net2c()
    elif net_type == 'CNN1c':
        net = nets.CNN1c()
    elif net_type == 'CNN2c':
        net = nets.CNN2c()
    else:
        print('invalide net type')
        raise ValueError
    criterion = nn.MSELoss()
    optimizer = optim.Adam(net.parameters(), lr=lr)

    # Data parallelism
    if torch.cuda.device_count() > 1:
        print('currently using', str(torch.cuda.device_count()),
              'cuda devices.')
        net = nn.DataParallel(net)
    net = net.float(
    )  # Runtime error handling for float type to use Data parallelism.
    vali_inputs = vali_inputs.float()
    vali_labels = vali_labels.float()

    # GPU usage: move data to device
    print('moving net and data to device')
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    net.to(device)
    vali_inputs = vali_inputs.to(device)
    vali_labels = vali_labels.to(device)

    # Configuration summary
    config = {
        'root_directory': root_directory,
        'input_type': input_type,
        'output_type': output_type,
        'mode': mode,
        'num_dataset:': num_dataset if num_dataset else 'full load',
        'dead_pmt': dead_pmt,
        'fast': fast,
        'net_type': net_type,
        'lr': lr,
        'batch_size': batch_size,
        'num_worker': num_worker,
        'num_epochs': num_epochs,
        'model': {l[0]: str(l[1])
                  for l in net.named_children()},
        'save directory': save_directory,
    }
    save(config, save_directory + '/configuration.json')

    # start training
    start_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    loss_history = {}
    for epoch in range(num_epochs):
        for i, data in enumerate(trainloader):
            # Get inputs and labels
            train_inputs, train_labels = data

            # Data parallelism: Runtime error handling
            train_inputs = train_inputs.float()
            train_labels = train_labels.float()

            # GPU usage: Move data to device
            train_inputs = train_inputs.to(device)
            train_labels = train_labels.to(device)

            # Get outputs
            optimizer.zero_grad()
            train_outputs = net(train_inputs)

            # Evaluate loss and optimize (update network)
            loss = criterion(train_outputs, train_labels)
            loss.backward()
            optimizer.step()

            # Get validation results
            if i % 100 == 0:
                vali_outputs = net(vali_inputs)
                vali_outputs = vali_outputs.detach().cpu().clone().numpy()
                try:
                    vali_labels = vali_labels.detach().cpu().clone().numpy()
                except AttributeError:
                    pass

                vali_dis = (vali_outputs - vali_labels) * 1000
                vali_sigma = np.std(vali_dis, axis=0)
                vali_mu = np.mean(vali_dis, axis=0)
                vali_loss = np.mean(vali_dis**2)

                dframe = {
                    'axis': ['x', 'y', 'z'],
                    'vali_sigma': vali_sigma,
                    'vali_mu': vali_mu,
                    'vali[0]': vali_outputs[0],
                    'labels[0]': vali_labels[0]
                }
                dframe = pd.DataFrame(dframe).T

                print('===========================================')
                print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                      'started at:', start_time)
                print('epoch: %02i (%04i/%i)' % (epoch, i, len(trainloader)))
                print('train loss(mm2)=%.1f, vali loss(mm2)=%.1f' %
                      (loss.item() * 1000 * 1000, vali_loss))
                print(dframe)

                if epoch not in loss_history:
                    loss_history[epoch] = {}
                loss_history[epoch][i] = loss.item()

                save(loss_history, save_directory + '/loss_history.json')
                mkdir(f'{save_directory}/models/epoch_{epoch:05}/')

                if torch.cuda.device_count() > 1:
                    # if using data parallelism, you should save model in module.state_dict()
                    torch.save(
                        net.module.state_dict(),
                        f'{save_directory}/models/epoch_{epoch:05}/{i:05}.pt')
                else:
                    torch.save(
                        net.state_dict(),
                        f'{save_directory}/models/epoch_{epoch:05}/{i:05}.pt')