Beispiel #1
0
def main():
    model = BehaviorCloneNet(12)
    #model = CarModel()
    model.to(device)
    ckpt_path = './checkpoints/deepwatch_13.pth'
    checkpoint = torch.load(ckpt_path)
    log_dir = './logging/'
    framesz = 360
    batch_size = 32
    lr = 0.001
    epochs = 100

    def signal_handler(signal, frame):
        print('You pressed Ctrl+C!')
        sys.exit(0)

    signal.signal(signal.SIGINT, signal_handler)
    
    transform_train = Compose([
        ToTensor(),
        Normalize(mean=[0.485, 0.456, 0.406],
                  std=[0.229, 0.224, 0.225])
    ])
    
    train_data = LogLoader(log_dir, framesz, transform_train)
    val_data = LogLoader(log_dir, framesz, transform_train, is_val=True)
    train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, 
                                               shuffle=True, num_workers=16, 
                                               pin_memory=True)
    val_loader = torch.utils.data.DataLoader(val_data, batch_size=batch_size, 
                                               shuffle=False, num_workers=16, 
                                               pin_memory=True)

    criterion = nn.MSELoss().to(device)
    #criterion = nn.L1Loss().to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    model.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])


    print("Begining training...")
    for epoch in range(0, epochs): #checkpoint['epoch']
        train(train_loader, model, criterion, optimizer, epoch)
        validate(val_loader, model, criterion, epoch)

        if (epoch + 1) % 5 == 0:
            print("Saving Checkpoint")
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }, ckpt_path)
Beispiel #2
0
    from jupyterplots import JupyterPlots

    colors = sns.color_palette()

    fig_x, fig_y = JupyterPlots()

    prefix = 'data/'

    fp = int(sys.argv[1])
    rho = sys.argv[2]

    label = rf'$f^P={fp},\rho={rho}$'

    flog = prefix + f'log_{fp}_{rho}.lammps.log'
    ll = LogLoader(flog, remove_chunk=0, merge_data=True)

    fig, axarr = plt.subplots(4, sharex=False, figsize=[fig_x, 4.5 * fig_y])

    ts = ll.data['Step']
    Press = ll.data['c_press']

    l1 = 'LAMMPS'
    l2 = 'Python'
    tcut = 1000000
    axarr[0].plot(ts[ts > tcut],
                  Press[ts > tcut],
                  'o',
                  color=colors[0],
                  label=label)
    template_dir = "../template/"
    output_dir   = '../logmatch_result/' # The result directory
    
    log_name = 'HDFS_2k.log' # The input log file path
    template_name =  log_name + '_templates.csv' # The event template file path

    log_format   = '<Date> <Time> <Pid> <Level> <Component>: <Content>' # HDFS log format
    n_workers    = 1 # The number of workers in parallel
    
    print( "start set sparkSession..." )
    spark = SparkSession.builder.appName( "sparkStream" ).getOrCreate()
    lines = spark.readStream.text( logfile_dir )

    print( "start to load log_format..." )
    # read log and analyse template
    log_load = LogLoader(log_format, n_workers = n_workers)
    headers, regex = log_load.headers, log_load.regex
    print( "regex type: ", type(regex) )
    print( "headers type: ", type(headers) )
    print( "headers: ", headers)  #  ['Date', 'Time', 'Pid', 'Level', 'Component', 'Content']
    split_line = udf( lambda line: formalize_line(line, regex, headers), StringType() )


    def split_msg(line):
        line_list = line.split("--#--")
        # print( "line len: ", len(line_list) )
        res = ''
        if len(line_list) < 6:
            pass
        else:
            res = line_list[5]  
Beispiel #4
0
    prefix = 'data/'

    fp = int(sys.argv[1])

    fdump = prefix + f'dump_{fp}.lammpstrj'

    if os.path.isfile(fdump + '.pkl'):
        dumpdata = load_obj(fdump, pstatus=True)
    else:
        duf = DumpFile(fdump, voronoi_flag=False, cg_flag=False)
        dumpdata = duf.read_dump()
        save_obj(dumpdata, fdump, pstatus=True)

    flog = prefix + f'log_{fp}.lammps.log'
    ll = LogLoader(flog, remove_chunk=None, merge_data=False)

    N = len(dumpdata)
    ncount = N

    roundoff = 2e-5

    Ps = np.empty([ncount], float)
    Perrs = np.empty([ncount])

    for i, data in enumerate(dumpdata[:ncount]):

        Ps[i] = 0
        Perrs[i] = 0

        coords = data['coords']
Beispiel #5
0
def ideal_pressure(fp,rho,d=2,Dr=3.):

    return rho+ideal_swim_press(fp,rho,d=d,Dr=Dr)


if __name__ == "__main__":

    from logloader import LogLoader
    import matplotlib.pyplot as plt
    
    #inpath = '../2020_04_07/rdotf_naive_pressure/data/'
    inpath = '../2020_04_08/winkler_pressure/data/'

    fname = inpath + 'log_100_0.2.lammps.log'

    ll = LogLoader(fname,remove_chunk=0,merge_data=True)

    ts = ll.data['Step']
    press = ll.data['c_press']

    tcut = 1000000
    press = press[ts>=tcut]

    Ts = np.linspace(0,5000,num=5001,endpoint=True,dtype=int)
    sigma2s = np.empty([len(Ts)],float)

    cts = None
    for i,T in enumerate(Ts):
        
        sigma2s[i],cts = standard_error_naive_estimate(T,press,corrs = cts)
Beispiel #6
0
    prefix1 = 'data2/'
    tcut = -1
    fps = np.array([1, 5, 10, 20, 40, 60, 80, 100], int)

    # density (in lj units)
    rho = '0.7'

    fig, axarr = plt.subplots(2,
                              sharex=True,
                              figsize=[figsize[0], figsize[0] * 2])

    fp = 100

    fname = f'pressure_{fp}_{rho}'
    ll = LogLoader(prefix1 + f'log_{fp}_{rho}.lammps.log')

    ts = ll.data['Step']
    RMSs = ll.data['c_mymsdd[4]']
    Ps = ll.data['c_press']
    Ds = ll.data['v_Diff']

    #data = np.loadtxt(prefix1 + fname + '.txt.fixprint')

    #ts = data[:,0]
    #Ts = data[:,1]
    #Ds = data[:,2]
    #Ps = data[:,3]
    #tcut = 200000
    print(ts)
    #axarr[0].plot(ts[1:],np.gradient(RMSs,ts)[1:]/4e-5,'o',label=rf'$f_p={fp}$')