示例#1
0
def train(net, train_loader, _run, lr, total_its, avg_window):
    optimizer = torch.optim.Adam(net.parameters(), lr = lr)
    loss_fn = torch.nn.CrossEntropyLoss()

    avg_loss = MovingAverage(avg_window)
    avg_accuracy = MovingAverage(avg_window)
    
    for it in range(0, total_its):
        for epoch, (batch_data, batch_target) in enumerate(train_loader):
            data = batch_data
            out = None
            for i in range(0, data.shape[1]):
                out = net.forward(
                    torch.tensor(data[:,i],dtype=torch.float32,device=data.device)
                    )
            loss = loss_fn(out, batch_target.type(torch.long).to(out.device))            
            
            _, preds = out.max(1)
            acc = (preds == batch_target.to(device)).float().mean()
            overall_epoch = len(train_loader)*it + epoch
            print("epoch:",overall_epoch,"/",len(train_loader)*total_its,"batch loss:",loss.item(), "batch accuracy: ",acc.item())
            
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            avg_loss += loss.item()
            avg_accuracy += acc.item()

            _run.log_scalar("loss", float(avg_loss))
            _run.log_scalar("accuracy", float(avg_accuracy))

            net.reset()
 def extract(self, fileName):
     f = open(fileName, 'r')
     headers = f.readline().split()
     headers = jm.getFileHeader(headers)
     jointsIndices = self.extractor.getJointsIndices(headers)
     frontDirections = []
     centers = []
     sholderVec = []
     for line in f:
         lineInFloats=[float(v) for v in line.split()]
         centers.append(ae.calcJointsAverage(lineInFloats, jointsIndices))
         shouldersVecOnXZPlan = ae.getVecBetweenJoints(headers, lineInFloats,\
                                'ShoulderRight_X', 'ShoulderLeft_X')
         #sholderVec.append(shouldersVecOnXZPlan)
         frontDirections.append([shouldersVecOnXZPlan[2], 0 , -shouldersVecOnXZPlan[0]])
         #frontDirections.append([1,0,0])
     frontDirections = zip(*ma.partsmovingAverage(zip(*frontDirections), 50, 1))
     movingDirections = np.diff(centers, axis=0)
     movingDirections = zip(*ma.partsmovingAverage(zip(*movingDirections), 50, 1))
     advancements = []
     for front, move in zip(frontDirections[:-1], movingDirections):
         if np.abs(front[0])/np.abs(front[2]) > 2:
             front = [ae.length(front),0,0]
         product = np.dot(front, move)
         advancements.append(product)
     return advancements
     """
示例#3
0
    def __init__(
        self, num_of_actions, epsilon=0.0001, num_of_neighbours=10, cluster_distance=0.008,
        pseudo_counts=0.001, maximum_similarity=8, episodic_memory_capacity=30000):
        self.epsilon = epsilon
        self.num_of_neighbours = num_of_neighbours
        self.cluster_distance = cluster_distance
        self.pseudo_counts = pseudo_counts
        self.maximum_similarity = maximum_similarity

        self.episodic_memory = deque([], maxlen=episodic_memory_capacity)
        self.moving_average = MovingAverage()
        self.network = Embedding(num_of_actions)
        self.optimizer = tf.keras.optimizers.Adam()
def analyzeDataInner(time, data, label):
    vec = []
    featuresNames = []
    v, f = getStats(data, label)
    vec += v
    featuresNames += f

    _, un = inter.getUniformSampled(time, data)
    cleaned = ma.movingAverage(un, 20, 1.1)
    v, f = getStats(cleaned, label + "after LPF ")
    vec += v
    featuresNames += f

    velocity = np.diff(cleaned)
    v, f = getStats(velocity, label + " velocity")
    vec += v
    featuresNames += f

    acceleration = np.diff(velocity)
    v, f = getStats(acceleration, label + " acceleration")
    vec += v
    featuresNames += f

    jurk = np.diff(acceleration)
    v, f = getStats(jurk, label + " jurk")
    vec += v
    featuresNames += f

    return vec, featuresNames
def cleanFracs(fracs, plot=False, MAwindowSize=8, MAexp=1.4):
    frameSize = math.ceil(np.sqrt(len(fracs)))    
    cleanedParts = []
    originalParts = []
    if(plot):
        pass
        #figOrigin = plt.figure()
        #figCleaned = plt.figure()
    i=1
    for time, values in fracs:
        if(plot):
            pass
            #curr = figOrigin.add_subplot(frameSize, frameSize, i)
            #curr.plot(time,values)
        #length = int((time[-1] - time[0]) / 30)
        time, values  = inter.getUniformSampled(time, values)
        originalParts.append(values)
        cleanesdValues = ma.movingAverage(values, MAwindowSize, MAexp)
        if(plot):
            plt.figure()
            plt.plot(values)
            plt.plot(cleanesdValues)
            plt.show()
            #curr = figCleaned.add_subplot(frameSize, frameSize, i)
            #curr.plot(time,values)
        cleanedParts.append(cleanesdValues)
        i+=1
    return cleanedParts, originalParts
示例#6
0
 def plotResults(self, vec, positive, negetive):
     plt.figure()
     #plt.plot(vec)
     filtered = ma.movingAverage(vec, 50, 1.0)
     #plt.plot(filtered)
     filtered = pu.normalizeVector(filtered)
     plt.plot(filtered)
     change = [filtered[i+1]-filtered[i] for i in range(len(filtered)-1)]
     change = ma.movingAverage(change, 75, 1.0)
     change = pu.normalizeVector(change)
     plt.plot(change)
     #plt.figure()
     clustersByPercentilies = spike.clusterByPercemtile(change, 700, 80)
     plt.plot(clustersByPercentilies)
     clustersByPercentilies = pu.smoothOutliers(clustersByPercentilies)
     plt.plot(clustersByPercentilies)
     #plt.figure()
     ranges = self.prepareRanges(clustersByPercentilies)
     self.printConfedence(filtered, ranges, positive, negetive)
     plt.show()    
示例#7
0
class LifeLong():
    def __init__(self):
        self.random_network = RND()
        self.predict_network = RND()
        self.moving_average = MovingAverage()
        
        self.random_network.trainable = False

    def reset(self):
        self.moving_average.reset()

    def get_modulator(self, observations):
        random = self.random_network(observations)
        predict = self.predict_network(observations)

        error = tf.math.square(predict - random)
        mean, stddev = self.moving_average(error)

        modulator = 1 + (error - mean) / stddev
        return modulator
    
    def train(self, batch_transitions: List[Transition]):
        observations = []
        next_observations = []

        for transition in batch_transitions:
            _observations = transition.observations[-6]
            
            for _observation, _next_observation in zip(_observations[:-1], _observations[1:]):
                observations.append(_observation)
                next_observations.append(_next_observation)
                
        observations = tf.convert_to_tensor(observations)
        next_observations = tf.convert_to_tensor(next_observations)
        with tf.GradientTape() as tape:
            with tape.stop_recording():
                true = self.random_network(observations)
            predict = self.predict_network(observations)
            loss = tf.keras.losses.mean_squared_error(true, predict)
        grads = tape.gradient(loss, self.predict_network.trainable_variables)
        self.optimizer.apply_gradients(zip(grads, self.predict_network.trainable_variables))
示例#8
0
 def LPF(self):
     self.analysisVec =  ma.movingAverage(self.analysisVec, 30, 1.1)
     return self.analysisVec
def main():
    USE_PZH = True

    # initialize
    interface001, interface002, pzhdata = ship_initialize(True, True, True)

    t = PeriodTimer(0.2)

    diff_x_average_gps = MovingAverage(100)
    diff_y_average_gps = MovingAverage(100)

    diff_x_average_lidar = MovingAverage(100)
    diff_y_average_lidar = MovingAverage(100)

    # t.start()

    cnt = 0
    end = 200

    try:
        while True:
            with t:
                self_state = interface001.receive('gps.posx', 'gps.posy',
                                                  'ahrs.yaw', 'ahrs.yaw_speed',
                                                  'gps.hspeed', 'gps.stdx',
                                                  'gps.stdy', 'gps.track')

                target_state = interface002.receive('gps.posx', 'gps.posy',
                                                    'ahrs.yaw',
                                                    'ahrs.yaw_speed',
                                                    'gps.hspeed', 'gps.stdx',
                                                    'gps.stdy', 'gps.track')

                assert pzhdata is not None
                lidar_data = pzhdata.receive()

                if lidar_data["terminated"]:
                    print(
                        "Peng Zhenghao's program is terminated. For safety we close this program."
                    )
                    break

                target = lidar_data["target"]
                if not target:
                    print("No Target Specified!")
                    continue
                else:
                    cnt += 1
                    # print("Current CNT")
                    goal = lidar_data[target]  # goal = [x, y]
                    diff_x = target_state[POS_X] - self_state[POS_X]
                    diff_y = target_state[POS_Y] - self_state[POS_Y]
                    diff_x_average_gps.update(diff_x)
                    diff_y_average_gps.update(diff_y)
                    diff_x_average_lidar.update(goal[0])
                    diff_y_average_lidar.update(goal[1])

                    phi2 = -atan2(diff_y_average_gps.avg,
                                  diff_x_average_gps.avg) - pi / 2
                    phi1 = atan2(diff_y_average_lidar.avg,
                                 diff_x_average_lidar.avg)
                    out = phi1 + phi2 - pi / 2

                    # offset = atan2(diff_y_average_lidar.avg, diff_x_average_lidar.avg) - \
                    #          atan2(diff_y_average_gps.avg, diff_x_average_gps.avg)

                    print("[CNT {}] Current GPS ({}, {}), LiDAR ({}, {}). \
                    ph1{}, ph2 {}, out {} ({} deg).".format(
                        cnt, diff_x_average_gps.avg, diff_y_average_gps.avg,
                        diff_x_average_lidar.avg, diff_y_average_lidar.avg,
                        phi1, phi2, out, out * 180 / pi))
                    if cnt >= end:
                        break

    finally:
        import pickle
        import time

        def get_formatted_time(timestamp=None):
            if not timestamp:
                return time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime())
            else:
                return time.strftime('%Y-%m-%d_%H-%M-%S',
                                     time.localtime(timestamp))

        if diff_y_average_lidar.avg is not None:
            phi2 = -atan2(diff_y_average_gps.avg,
                          diff_x_average_gps.avg) - pi / 2
            phi1 = atan2(diff_y_average_lidar.avg, diff_x_average_lidar.avg)
            out = phi1 + phi2 - pi / 2

            out = atan2(diff_y_average_lidar.avg, diff_x_average_lidar.avg) - \
                  atan2(diff_y_average_gps.avg, diff_x_average_gps.avg)
            pickle.dump(
                {
                    "offset": out,
                    "timestamp": time.time(),
                    "time": get_formatted_time()
                }, open("offset.pkl", "wb"))
            print("Data have saved to offset.pkl")
        else:
            print("Data is not received.")

        time.sleep(0.5)
        interface001.dev.close()
        interface002.dev.close()
        pzhdata.dev.close()
        print('dev closed')
示例#10
0
def main():
    Parser = argparse.ArgumentParser()
    Parser.add_argument('--method',
                        default="delTri",
                        help='type of Faceswapper')
    Parser.add_argument('--DataPath',
                        default="../Data/TestSet/",
                        help='base path where data files exist')
    Parser.add_argument('--VideoFileName',
                        default="Test1.mp4",
                        help='Video Name')
    Parser.add_argument('--RefImageFileName',
                        default='NONE',
                        help=' Reference Image')
    Parser.add_argument('--SavePath',
                        default="../Results/",
                        help='Folder to save results')
    Parser.add_argument(
        '--PredictorPath',
        default='./phase1/Misc/shape_predictor_68_face_landmarks.dat',
        help='dlib shape predictor path')
    Parser.add_argument('--use_filter',
                        default=False,
                        type=lambda x: bool(int(x)),
                        help='use filter or not')

    Args = Parser.parse_args()
    DataPath = Args.DataPath
    RefImageFileName = Args.RefImageFileName
    SavePath = Args.SavePath
    method = Args.method
    VideoFileName = Args.VideoFileName
    path_to_shape_predictor = Args.PredictorPath
    use_filter = Args.use_filter

    RefImageFilePath = DataPath + RefImageFileName
    VideoFilePath = DataPath + VideoFileName
    #     SavePath = SavePath + method + "/"
    SaveFileName = DataPath + SavePath

    #     if(not (os.path.isdir(SavePath))):
    #         print(SavePath, "  was not present, creating the folder...")
    #         os.makedirs(SavePath)

    #######################################
    ##### Setting up reference image ######
    ########################################
    print('Setting up reference image.......')
    FaceRef = cv2.imread(RefImageFilePath)  ## color image

    ########## Choose Mode ############
    #######################################
    if FaceRef is None:
        mode = 2
    else:
        mode = 1
    print("we are in mode ", mode, " using ", method)

    ##### Setting up video ######
    ##############################
    cap = cv2.VideoCapture(VideoFilePath)
    frame_width = int(cap.get(3))
    frame_height = int(cap.get(4))
    result = cv2.VideoWriter(SaveFileName, cv2.VideoWriter_fourcc(*'DIVX'), 10,
                             (frame_width, frame_height))

    ##############################################################################
    ########################### mode 1 ###########################################
    ##############################################################################
    if mode == 1:
        print("Doing Mode 1 with Reference Image")

        ##################################################
        ########### Setup pretrained models ##############
        ##################################################
        if method != 'PRNet':
            detector = dlib.get_frontal_face_detector()
            predictor = dlib.shape_predictor(path_to_shape_predictor)

            grayFaceRef = cv2.cvtColor(FaceRef,
                                       cv2.COLOR_BGR2GRAY)  ## gray image
            N_pyrLayers = 1
            boxRef = detector(grayFaceRef,
                              N_pyrLayers)  ## reference bounding box
            if len(boxRef) != 1:
                print('Cannot find face in reference Image... exiting... :( ',
                      len(boxRef))
                exit()
            else:
                boxRef = boxRef[0]
            ##ref image features
            FaceMarks_ref, BoundingBox_ref, shifted_FaceMarks_ref = FaceDetector(
                FaceRef, boxRef, predictor)
            (x, y, w, h) = BoundingBox_ref
            FaceRef_crop = FaceRef[y:y + h, x:x + w]

        elif method == 'PRNet':
            prn = PRN(is_dlib=True)

        ##################################################
        ################# Run the Loop  ##################
        ##################################################
        facemarksFrames = []
        prev_pos = None
        #for filtering
        old_boxFrame = None
        del_boxFrame = 0
        first_time = True

        moving_average_position = MovingAverage(window_size=4,
                                                weight=[1, 1, 1, 1])
        moving_average_del = MovingAverage(window_size=4, weight=[1, 1, 1, 1])
        FaceMarkersFrame = 0
        while (True):
            ret, frame = cap.read()
            if not ret:
                print("Stream ended..")
                break
            frame = adjustGamma(frame, 1.5)
            frame[frame > 255] = 255

            # frame = cv2.resize(frame, (ref_w,ref_h))

            ################# DeepLearning Method  ##################
            #########################################################
            if method == 'PRNet':

                pos = prn.process(frame)
                ref_pos = prn.process(FaceRef)

                if pos is None:
                    if prev_pos is not None:
                        pos = prev_pos
                    else:
                        print("## No face Found ##")
                        WarpedFace = frame
        #                 text_location = (frame_width-int(frame_width*0.2), frame_height- frame_height*0.1)
        #                 WarpedFace = cv2.putText(WarpedFace, 'Cannot Find Face',text_location, cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 0) ,3, cv2.LINE_AA, False)
                if pos is not None:
                    #             if len(pos)>1:
                    #                 print('Wrong Mode chosen')
                    #                 exit()
                    #             elif len(pos)==1:
                    WarpedFace = FaceSwap_DL(prn, pos, ref_pos, frame, FaceRef)

            #####################################################################
            ######################## Traditional methods ########################
            else:
                boxFrame = Top1Box(detector(gray(frame), 1))
                print(len(boxFrame))

                ### Failure Cases ###
                #Check if face is detected
                if len(boxFrame) > 1:
                    margin = 50
                    boxRef, boxFrame = boxFrame[0], boxFrame[1]
                    FaceFrame = FaceFrame[rects[1].top() -
                                          margin:rects[1].bottom() + margin,
                                          rects[1].left() -
                                          margin:rects[1].right() + margin, :]
                    FaceRef = FaceRef[rects[0].top() -
                                      margin:rects[0].bottom() + margin,
                                      rects[0].left() -
                                      margin:rects[0].right() + margin, :]

                elif (len(boxFrame) < 1):
                    if not first_time:
                        print("no box detected, using old box")
                        # boxFrame.append(old_boxFrame[0] + del_boxFrame)
                        if del_box_corners is not None:
                            boxFrame = predictBoxes(old_boxFrame,
                                                    del_box_corners)
                        else:
                            boxFrame = old_boxFrame

                    else:
                        print("No face found")
                        WarpedFace = frame
                ########################################

                ### Success Cases ###
                if (len(boxFrame) > 0):
                    # if not first_time:
                    #     del_boxFrame = boxFrame[0] - old_boxFrame[0]
                    #     del_flag = True
                    # del_box = getDelBox(boxFrame, old_boxFrame)
                    del_box_corners = getDelBox(boxFrame, old_boxFrame)
                    old_boxFrame = boxFrame
                    boxFrame = boxFrame[0]

                    if method == 'TPS':

                        WarpedFace, FaceMarkersFrame = FaceSwap1_TPS(
                            FaceRef_crop, shifted_FaceMarks_ref, frame,
                            boxFrame, predictor, use_filter, first_time,
                            FaceMarkersFrame, moving_average_position,
                            moving_average_del)
                        #frame smoothening
                        if use_filter:
                            if not first_time:
                                # WarpedFace, del_frame = smoothenFrames(WarpedFace, past_frame)
                                past_frame = WarpedFace
                            else:
                                past_frame = WarpedFace

                        first_time = False

                    elif method == 'delTri':

                        WarpedFace, FaceMarkersFrame = FaceSwap1_delTri(
                            FaceRef, FaceMarks_ref, frame, boxFrame, predictor,
                            use_filter, first_time, FaceMarkersFrame,
                            moving_average_position, moving_average_del)
                        #frame smoothening
                        if use_filter:
                            if not first_time:
                                WarpedFace, del_frame = smoothenFrames(
                                    WarpedFace, past_frame)
                                past_frame = WarpedFace
                            else:
                                past_frame = WarpedFace

                        first_time = False

                #######################
            cv2.imshow(str(method), WarpedFace)
            result.write(WarpedFace)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
    # MODE2_
    else:
        ##################################################
        ########### Setup pretrained models ##############
        ##################################################
        print("Doing Mode 2 Swap within frame")
        print(method)
        if method == 'PRNet':
            prn = PRN_(is_dlib=True)
        else:
            detector = dlib.get_frontal_face_detector()
            predictor = dlib.shape_predictor(path_to_shape_predictor)

        ##################################################
        ################# Setup Video ####################
        ##################################################
        # print("setting up the video")
        # cap = cv2.VideoCapture(VideoFilePath)
        # frame_width = int(cap.get(3))
        # frame_height = int(cap.get(4))
        # result = cv2.VideoWriter(SaveFileName,
        #                         cv2.VideoWriter_fourcc(*'DIVX'),
        #                         10, (frame_width, frame_height))

        facemarksFrames = []
        ##################################################
        ################ Run loop ########################
        ##################################################
        outputs = []
        print("Begin swapping")
        while (True):
            ret, frame = cap.read()

            if not ret:
                print("Stream ended..")
                break

            if method == 'PRNet':

                poses = prn.process(frame)
                if poses is None:
                    poses = prev_poses
                print("number of Faces found...", len(poses))
                if len(poses) < 2:
                    poses = prev_poses
                if len(poses) == 2:
                    prev_poses = poses
                    pose1, pose2 = poses[0], poses[1]
                    WarpedFace_tmp = FaceSwap_DL(prn, pose1, pose2, frame,
                                                 frame)
                    WarpedFace = FaceSwap_DL(prn, pose2, pose1, WarpedFace_tmp,
                                             frame)
                else:
                    WarpedFace = frame
                    text_location = (frame_width - 400, frame_height - 42)
                    WarpedFace = cv2.putText(WarpedFace, 'Cannot Find Face',
                                             text_location,
                                             cv2.FONT_HERSHEY_SIMPLEX, 1.2,
                                             (255, 0, 0), 3, cv2.LINE_AA,
                                             False)

            else:
                boxes = Top2Boxes(detector(gray(frame), 1))
                print("number of found...", len(boxes))

                # if len(boxes)<2:
                #     boxes = old_box
                if boxes is None or len(boxes) < 2:
                    if not first_time:
                        boxes = old_box

                if boxes is not None and len(boxes) == 2:  #review
                    old_box = boxes
                    # prev_boxes = boxes
                    box1, box2 = boxes[0], boxes[1]
                    if method == 'delTri':
                        WarpedFace = FaceSwap2_delTri(frame, boxes[0],
                                                      boxes[1], predictor)
                        first_time = False
                    elif method == 'TPS':
                        WarpedFace = FaceSwap2_TPS(frame, boxes[0], boxes[1],
                                                   predictor)
                        first_time = False
                else:
                    WarpedFace = frame
                    text_location = (frame_width - 400, frame_height - 42)
                    WarpedFace = cv2.putText(WarpedFace, 'Cannot Find Face',
                                             text_location,
                                             cv2.FONT_HERSHEY_SIMPLEX, 1.2,
                                             (255, 0, 0), 3, cv2.LINE_AA,
                                             False)

            cv2.imshow(str(method), WarpedFace)
            result.write(np.uint8(WarpedFace))
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

    cap.release()
    result.release()
    cv2.destroyAllWindows()
示例#11
0
def main():
    corpus = DATA_PATH[FLAGS.corpus_name]
    model = Model(corpus, **opts)

    n_batches = _get_n_batches(FLAGS.batch_size, model.corpus_size)

    # TODO: rename to pretrain
    g_loss = model.g_tensors_pretrain.loss
    g_train_op = set_train_op(g_loss, model.g_tvars)

    g_loss_valid = model.g_tensors_pretrain_valid.loss

    d_logits_real = model.d_tensors_real.prediction_logits
    d_logits_fake = model.d_tensors_fake.prediction_logits

    gan_d_loss, gan_g_loss = gan_loss(d_logits_real,
                                      d_logits_fake,
                                      gan_type=FLAGS.gan_type)
    gan_d_train_op = set_train_op(gan_d_loss, model.d_tvars)
    gan_g_train_op = set_train_op(gan_g_loss, model.g_tvars)

    g_loss_ma = MovingAverage(10)

    sv = get_supervisor(model)
    sess_config = get_sess_config()

    tf.logging.info(" number of parameters %i", count_number_of_parameters())

    with sv.managed_session(config=sess_config) as sess:
        sess.run(_phase_train)

        start_threads(model.enqueue_data, (sess, ))
        start_threads(model.enqueue_data_valid, (sess, ))

        # TODO: add learning rate decay -> early_stop
        sv.loop(60, print_loss, (sess, g_loss, g_loss_ma))
        sv.loop(600, print_valid_loss, (sess, g_loss_valid))
        sv.loop(
            100, print_sample,
            (sess, FLAGS.seed_text, model.g_tensors_pretrain_valid.flat_logits,
             model.input_ph, model.word2idx, model.idx2word))  # TODO: cleanup

        # make graph read only
        sess.graph.finalize()

        for epoch in range(FLAGS.epoch_size):
            tf.logging.info(" epoch: %i", epoch)

            for _ in tqdm.tqdm(range(n_batches)):
                if sv.should_stop():
                    break

                # TODO: add strategies
                # print(sess.run(model.source_valid))
                # print(sess.run(g_loss_valid))
                sess.run([g_train_op, model.increment_global_step_op
                          ])  # only run generator
                # sess.run([g_train_op, d_train_op, model.global_step])

                if False:
                    # some criterion
                    sv.stop()

        sv.saver.save(sess, sv.save_path, global_step=sv.global_step)
        tf.logging.info(" training finished")
示例#12
0
def main():
    data_path = '/data/dataset/svhn'

    train_data = dataset.joint_data_loader(data_path, data_path, PP)
    test_data = dataset.joint_data_loader(data_path,
                                          data_path,
                                          is_training=False)

    train_loader = DataLoader(train_data,
                              batch_size=BATCH_SIZE,
                              num_workers=2,
                              shuffle=True,
                              drop_last=True)
    test_loader = DataLoader(test_data,
                             batch_size=4,
                             num_workers=2,
                             shuffle=False)

    net = vgg.LeNet(14)

    net.apply(weights_init)
    cudnn.benchmark = True

    net.train().cuda()

    criterion = nn.CrossEntropyLoss()

    trainable_list = filter(lambda p: p.requires_grad, net.parameters())
    other_base_list = filter(lambda p: p[0].split('.')[-1] != 'bases1',
                             net.named_parameters())
    other_base_list = [
        x[1] for x in other_base_list if x[1].requires_grad == True
    ]

    named_base_list = filter(lambda p: p[0].split('.')[-1] != 'bases0',
                             net.named_parameters())
    base_list = [x[1] for x in named_base_list if x[1].requires_grad == True]

    print('Totolly %d bases for domain1' % (len(base_list)))

    main_list = other_base_list

    # optimizer = torch.optim.SGD(net.parameters(), FLAGS.learning_rate, 0.9, weight_decay=0.0001, nesterov=True)
    if OPTIMIZER == 'momentum':
        optimizer = torch.optim.SGD(net.parameters(),
                                    BASE_LEARNING_RATE / 1,
                                    0.9,
                                    weight_decay=0.0001,
                                    nesterov=True)

    elif OPTIMIZER == 'adam':
        optimizer = torch.optim.Adam(net.parameters(),
                                     BASE_LEARNING_RATE,
                                     weight_decay=0.0001)  ###__0.0001->0.001

    elif OPTIMIZER == 'rmsp':
        optimizer = torch.optim.RMSprop(net.parameters(),
                                        BASE_LEARNING_RATE,
                                        weight_decay=0.0001)

    count = 1
    epoch = 0

    cudnn.benchmark = True

    MOVING_SCALE = 100

    loss_ma0 = MovingAverage(MOVING_SCALE)
    loss_ma1 = MovingAverage(MOVING_SCALE)

    loss_dis_ma = MovingAverage(MOVING_SCALE)
    loss_style_ma = MovingAverage(MOVING_SCALE)
    loss_main_ma = MovingAverage(MOVING_SCALE)
    loss_sim_ma = MovingAverage(MOVING_SCALE)
    #loss_l1 = MovingAverage(100)
    acc_ma_0 = MovingAverage(MOVING_SCALE)
    acc_ma_1 = MovingAverage(MOVING_SCALE)

    test_loss = AverageMeter()
    test_acc = AverageMeter()
    while True:
        if epoch >= MAX_EPOCH: break
        epoch += 1
        log_string('********Epoch %d********' % (epoch))
        for i, data in enumerate(train_loader):
            img0, img1, gt0, gt1 = data
            # img1, img0, gt1, gt0 = data
            # pdb.set_trace()
            count += 1

            imgs_in_0 = Variable(img0).float().cuda()
            gt_in_0 = Variable(gt0).long().cuda()
            imgs_in_1 = Variable(img1).float().cuda()
            gt_in_1 = Variable(gt1).long().cuda()

            pred = net(torch.cat([imgs_in_0, imgs_in_1], 0))
            # pdb.set_trace()
            pred0 = pred[:BATCH_SIZE]
            pred1 = pred[BATCH_SIZE:]

            f0 = net.feature[:BATCH_SIZE]
            f1 = net.feature[BATCH_SIZE:]

            loss0 = criterion(pred0, gt_in_0)
            loss1 = criterion(pred1, gt_in_1)

            loss_all = loss0 + loss1

            acc_this_0 = accuracy(pred0, gt_in_0)
            acc_ma_0.update(acc_this_0[0].item())

            acc_this_1 = accuracy(pred1, gt_in_1)
            acc_ma_1.update(acc_this_1[0].item())

            optimizer.zero_grad()
            loss_all.backward()
            optimizer.step()
            optimizer.zero_grad()

            loss_ma0.update(loss0.item())
            loss_ma1.update(loss1.item())

            if count % FLAGS.print_inter == 0:
                log_string('[Current iter %d, accuracy0 is %3.2f, accuracy1 is %3.2f, \
loss0 is %2.6f, loss1 is %2.6f, lr: %f]'
                    %(count, acc_ma_0.avg, acc_ma_1.avg, \
                        loss_ma0.avg, loss_ma1.avg, \
                        optimizer.param_groups[0]['lr']))
            if count % 500 == 0:
                validation(test_loader, net, criterion, count, epoch,
                           test_loss, test_acc)
            if count % 1000 == 0:
                torch.save(net.state_dict(),
                           './' + LOG_DIR + '/' + 'model.pth')
            if count % DECAY_STEP == 0 and optimizer.param_groups[0]['lr'] >= (
                    BASE_LEARNING_RATE / 10.0):
                optimizer.param_groups[0][
                    'lr'] = optimizer.param_groups[0]['lr'] / 10.0
                optimizer_base1.param_groups[0][
                    'lr'] = optimizer_base1.param_groups[0]['lr'] / 10.0
                optimizer_dis.param_groups[0][
                    'lr'] = optimizer_dis.param_groups[0]['lr'] / 10.0

    log_string('Training reaches maximum epoch.')
示例#13
0
def main():
    train_data = dataset.AFLW('/data/dataset/face/aflw/AFLWinfo_release.mat',
                              '/data/dataset/face/aflw/data/flickr/',
                              256,
                              is_training=True)
    test_data = dataset.AFLW('/data/dataset/face/aflw/AFLWinfo_release.mat',
                             '/data/dataset/face/aflw/data/flickr/',
                             256,
                             is_training=False)

    train_loader = DataLoader(train_data,
                              batch_size=BATCH_SIZE,
                              num_workers=4,
                              shuffle=True,
                              drop_last=True)
    test_loader = DataLoader(test_data,
                             batch_size=1,
                             num_workers=4,
                             shuffle=False)

    # net = model.DenseNet()
    # net = model.DenseNet_nz()
    # net = model1.vgg_19()
    net = resnet50(pretrained=True)
    net.apply(weight_init)

    net.train().cuda()

    loss_func = nn.CrossEntropyLoss().cuda()

    criterion = l1_loss(False).cuda()

    if OPTIMIZER == 'momentum':
        optimizer = torch.optim.SGD(net.parameters(),
                                    BASE_LEARNING_RATE,
                                    0.9,
                                    weight_decay=0.0001,
                                    nesterov=True)
    elif OPTIMIZER == 'adam':
        optimizer = torch.optim.Adam(net.parameters(),
                                     BASE_LEARNING_RATE,
                                     weight_decay=0.0001)  ###__0.0001->0.001
    elif OPTIMIZER == 'rmsp':
        optimizer = torch.optim.RMSprop(net.parameters(),
                                        BASE_LEARNING_RATE,
                                        weight_decay=0.0001)
    # optimizer = torch.optim.SGD(net.parameters(), learning_rate, 0.9, weight_decay=0.0001, nesterov=True)

    count = 1
    epoch = 0

    loss_ma = MovingAverage(100)

    test_loss = AverageMeter()

    while True:
        epoch += 1
        log_string('********Epoch %d********' % (epoch))
        for i, data in enumerate(train_loader):
            imgs, gt = data
            count += 1

            imgs_in = Variable(imgs).float().cuda()
            gt_in = Variable(gt).float().cuda()

            pred = net(imgs_in)

            loss = criterion(pred, gt_in)
            # loss = loss0 + loss1

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            loss_ma.update(loss.data[0])

            if count % 100 == 0:
                log_string(
                    '[Current iter %d, l1 loss is %2.6f, lr: %f]' %
                    (count, loss_ma.avg, optimizer.param_groups[0]['lr']))
                out_im = imgs[0].numpy()
                out_gt = gt[0].numpy()
                out_pred = pred[0].cpu().data.numpy().squeeze()

                dataset.draw_im(out_im,
                                out_gt,
                                out_pred,
                                count,
                                0,
                                LOG_DIR,
                                is_training=True)
            if count % 1000 == 0:
                validation(test_loader, net, criterion, count, epoch,
                           test_loss)
            if count % 1000 == 0:
                torch.save(net, './' + LOG_DIR + '/' + 'model.pth')
            if count % 15000 == 0 and optimizer.param_groups[0]['lr'] >= (
                    BASE_LEARNING_RATE / 100.0):
                optimizer.param_groups[0][
                    'lr'] = optimizer.param_groups[0]['lr'] / 10.0
示例#14
0
 def __init__(self):
     self.random_network = RND()
     self.predict_network = RND()
     self.moving_average = MovingAverage()
     
     self.random_network.trainable = False
示例#15
0
start_epoch = 0  # start from epoch 0 or last epoch
if args.resume:
    log_string('==> Resuming from checkpoint..')
    checkpoint = torch.load(args.checkpoint)
    net.load_state_dict(checkpoint['net'])
    best_loss = checkpoint['loss']
    start_epoch = checkpoint['epoch']

criterion = SSDLoss(num_classes=4)
optimizer = optim.SGD(net.parameters(),
                      lr=args.lr,
                      momentum=0.9,
                      weight_decay=1e-4)

#trainin monitors
loss_ma = MovingAverage(100)
loss_loc_ma = MovingAverage(100)
loss_cls_ma = MovingAverage(100)
test_loss = AverageMeter()
test_loc_loss = AverageMeter()
test_cls_loss = AverageMeter()


# Training
def train(epoch):
    log_string('\nEpoch: %d' % epoch)
    net.train()
    # train_loss = 0
    if (
            epoch + 1
    ) % 80 == 0:  #and optimizer.param_groups[0]['lr'] >= (BASE_LEARNING_RATE / 10.0):
示例#16
0
import utils.utils as ut
import utils.MovingAverage as ma

file = 'AMCs/598.amc'
joint = 'rtibia'
list = getAMCperiod(joint, file)
stride = list[112:251] 
list = ut.alignByMax(stride)
#list = ut.alignByMax(list)
noiseStdFactor = 0.04
amplitude = np.max(list) - np.min(list)
var = (amplitude*noiseStdFactor)**2
print var
partsAmount = 16
noisy_parts = createParts(list, True, partsAmount, var)
parts = ma.partsmovingAverage(noisy_parts)

frameSize = math.ceil(np.sqrt(len(parts)))
fig = plt.figure()
for i,part in enumerate(parts):
    curr = fig.add_subplot(frameSize,  frameSize, i+1)
    curr.plot(part)
        
merged, mergedDes = loop.stitch(parts)
print(len(merged))
bestFeatures = merged[-1]
des = mergedDes[-1]
outOff, listOff = ut.alignByBig(bestFeatures, list)
plt.figure()
plt.plot(xrange(listOff, listOff+len(list)), list, color='b')
plt.plot(xrange(outOff, outOff+len(bestFeatures)), bestFeatures, color='r')
def evalParams(partsAmount):
    noiseStdFactor = 0.02       
    stances = {}
    swings = {}
    for subject in subjects:
        for index in xrange(8):
            try:
                input = getAMCInput(joint, subject, index)
            except:
                continue
            amplitude = np.max(input) - np.min(input)
            var = (amplitude*noiseStdFactor)**2
            for stitch in xrange(stitchesNum):
                try:
                    noisy_parts = st.createParts(input, False, partsAmount, var)
                except:
                    continue
                parts = ma.partsmovingAverage(noisy_parts)
                stitched_parts, des = loop.stitch(parts)
                if(len(stitched_parts) == len(parts)):
                    continue
                stitched = stitched_parts[-1]
                input = stitched
                tmpSwings, tmpStances = getSwingsAndStances(input)
                key = (stitch, subject) 
                for stanceLen, swingLen in zip(tmpStances, tmpSwings):
                    if(stanceLen > 55):
                        stances[key] = stances.get(key, []) + [stanceLen]
                    if(swingLen > 35):
                        swings[key] = swings.get(key, []) + [swingLen]
    stancesCV = []
    swingsCV = []
    strideCV = []
    strideLengths = {}
    stancesLengths = {}
    swingsLengths = {}
    for stitch in xrange(stitchesNum):
        for subject in subjects:
            key = (stitch, subject)
            stance = None
            if not(key in stances and key in swings):
                continue
            stance = stances[key]
            cv = getCV(stance)
            if not math.isnan(cv) and 0 < cv :
                stancesCV.append(cv)
            stancesLengths[subject] = stancesLengths.get(subject, []) + stance
            
            swing = swings[key]
            cv = getCV(swing)
            if not math.isnan(cv) and 0 < cv :
                swingsCV.append(cv)
            swingsLengths[subject] = swingsLengths.get(subject, []) + swing
            
            strideLength = [x+y for x,y in zip(stance, swing)]
            cv = getCV(strideLength)
            if not math.isnan(cv) and 0 < cv :
                strideCV.append(cv)
            strideLengths[subject] = strideLengths.get(subject, []) + strideLength
    
    stancesLengths = [stancesLengths[subject] for subject in stancesLengths]
    swingsLengths = [swingsLengths[subject] for subject in swingsLengths]
    strideLengths = [strideLengths[subject] for subject in strideLengths]
    strideReference = [115.61538461538461, 113.66666666666667, 116.11111111111111, 126.375]
    strideMeans = [np.mean(seq) for seq in strideLengths]
    finalGrade = np.mean([np.abs(y-x)/x for x,y in zip(strideReference, strideMeans)])   
    stanceCVmean = np.mean(stancesCV)
    swingCVmean = np.mean(swingsCV)
    strideCVsMean = np.mean(strideCV)
    titles = ['Subject: ' + str(x) for x in subjects]
    #plotParts(stancesLengths, 'Stance number', 'Stance time(in frames)', titles)
    #plotParts(swingsLengths, 'Swing number', 'Swing time(in frames)', titles)
    #plotParts(strideLengths, 'Stride number', 'Stride time(in frames)', titles)
    print 'partsAmount', partsAmount
    print stanceCVmean, [np.mean(seq) for seq in stancesLengths]
    print  swingCVmean, [np.mean(seq) for seq in swingsLengths]
    print strideCVsMean, strideMeans
    return finalGrade
示例#18
0
    row=[]
    for p2 in minedParts:
        dis = getDistanceBetweenFracs(p1, p2, lenOfCycle)
        row.append(dis)
    mat.append(row)
print mat
path = solve_tsp( mat )
print path
#whole = qu.orderWithCost(minedParts, getDistanceBetweenFracs, appendFracs)
whole = minedParts[path[0]]
for i in path[1:]:
    whole = appendFracs(whole, minedParts[path[i]], lenOfCycle)
whole = whole[0]
    
plt.figure()
whole = ma.movingAverage(whole, 10, 1.3)
plt.plot(whole)
#plt.plot()
#plt.show()
periods =[]
strides = []
stances = []
swings = []
maximaOrder=15
clusteringGranularity=0.5
breaked = prt.breakToPeriods(whole,maximaOrder, clusteringGranularity)
for cycle in breaked:
    if len(cycle)>25 and len(cycle)<60:
        #periods.append(cycle)
        strides.append(cycle)
        min = np.argmin(cycle)
示例#19
0
class Episodic():
    def __init__(
        self, num_of_actions, epsilon=0.0001, num_of_neighbours=10, cluster_distance=0.008,
        pseudo_counts=0.001, maximum_similarity=8, episodic_memory_capacity=30000):
        self.epsilon = epsilon
        self.num_of_neighbours = num_of_neighbours
        self.cluster_distance = cluster_distance
        self.pseudo_counts = pseudo_counts
        self.maximum_similarity = maximum_similarity

        self.episodic_memory = deque([], maxlen=episodic_memory_capacity)
        self.moving_average = MovingAverage()
        self.network = Embedding(num_of_actions)
        self.optimizer = tf.keras.optimizers.Adam()


    def reset(self):
        self.episodic_memory.clear()
        self.moving_average.reset()


    def get_similarity(self, observations: Observation, next_observations: Observation) -> Tuple[List[tf.float32], float]:
        likelihood, controllable_state = self.network(observations, next_observations)

        dists = [ tf.norm(controllable_state - state) for state in self.buffer ]
        dists = tf.nn.top_k(dists, self.num_of_neighbours)
        squared_dists = tf.math.square(dists)

        for squared_dist in squared_dists:
            self.moving_average(squared_dist)
        
        normalized_dists = squared_dists / self.moving_average.prev_mean
        normalized_dists = tf.math.maximum(normalized_dists - self.cluster_distance, 0.0)
        
        kernels = self.epsilon / (normalized_dists + self.epsilon)

        similarity = tf.math.sqrt(tf.math.reduce_sum(kernels)) + self.pseudo_counts

        self.buffer.append(controllable_state)
        
        if similarity > self.maximum_similarity:
            return likelihood, 0.0
        else:
            return likelihood, 1 / similarity
        
    def train(self, batch_transitions: List[Transition]):
        observations = []
        next_observations = []
        actions = []
        for transition in batch_transitions:
            _observations = transition.observations[-6]
            _actions = transition.actions[-5]
            
            for _observation, _next_observation, _action in zip(_observations[:-1], _observations[1:], _actions):
                observations.append(_observation)
                next_observations.append(_next_observation)
                actions.append(_action)
                
        observations = tf.convert_to_tensor(observations)
        next_observations = tf.convert_to_tensor(next_observations)
        actions = tf.convert_to_tensor(actions)
        with tf.GradientTape() as tape:
            likelihood, _ = self.network(observations, next_observations)
            loss = tf.keras.losses.mean_squared_error(actions, likelihood)
        grads = tape.gradient(loss, self.network.trainable_variables)
        self.optimizer.apply_gradients(zip(grads, self.network.trainable_variables))
示例#20
0
def main():
    corpus = DATA_PATH[FLAGS.corpus_name]
    model = Model(corpus, **opts)

    n_batches = _get_n_batches(FLAGS.batch_size, model.corpus_size)

    # TODO: rename to pretrain
    g_loss = model.g_tensors_pretrain.loss
    g_train_op = set_train_op(g_loss, model.g_tvars)

    g_loss_valid = model.g_tensors_pretrain_valid.loss

    d_logits_real = model.d_tensors_real.prediction_logits
    d_logits_fake = model.d_tensors_fake.prediction_logits

    gan_d_loss, gan_g_loss = gan_loss(d_logits_real,
                                      d_logits_fake,
                                      gan_type=FLAGS.gan_type)

    gan_d_train_op = set_train_op(gan_d_loss, model.d_tvars)
    gan_g_train_op = set_train_op(gan_g_loss, model.g_tvars)

    g_loss_ma = MovingAverage(10)

    sv = get_supervisor(model)
    sess_config = get_sess_config()

    tf.logging.info(" number of parameters %i", count_number_of_parameters())

    with sv.managed_session(config=sess_config) as sess:
        sess.run(_phase_train)

        start_threads(model.enqueue_data, (sess, ))
        start_threads(model.enqueue_data_valid, (sess, ))

        # TODO: add learning rate decay -> early_stop
        if FLAGS.gan_strategy == "pretrain":
            sv.loop(60, print_loss, (sess, g_loss, g_loss_ma))
            sv.loop(600, print_valid_loss, (sess, g_loss_valid))
            sv.loop(
                100, print_sample,
                (sess, FLAGS.seed_text,
                 model.g_tensors_pretrain_valid.flat_logits, model.input_ph,
                 model.word2idx, model.idx2word))  # TODO: cleanup
        elif FLAGS.gan_strategy in [
                "generator", "simultaneous", "alternating"
        ]:
            # sv.loop(60, print_loss, (sess, g_loss, g_loss_ma))
            # sv.loop(600, print_valid_loss, (sess, g_loss_valid))
            sv.loop(
                100, print_sample,
                (sess, FLAGS.seed_text, model.g_tensors_fake_valid.flat_logits,
                 model.input_ph, model.word2idx, model.idx2word))

        # make graph read only
        sess.graph.finalize()

        for epoch in range(FLAGS.epoch_size):
            tf.logging.info(" epoch: %i", epoch)

            for _ in tqdm.tqdm(range(n_batches)):
                if sv.should_stop():
                    break

                if FLAGS.gan_strategy == "pretrain":
                    sess.run([g_train_op, model.increment_global_step_op])
                elif FLAGS.gan_strategy == "generator":
                    sess.run([gan_g_train_op, model.increment_global_step_op])
                elif FLAGS.gan_strategy == "discriminator":
                    sess.run([gan_d_train_op, model.increment_global_step_op])
                elif FLAGS.gan_strategy == "simultaneous":
                    sess.run([
                        gan_g_train_op, gan_d_train_op,
                        model.increment_global_step_op
                    ])
                elif FLAGS.gan_strategy == "alternating":
                    assert 0. < FLAGS.gan_gd_ratio < 1.0
                    u = random.random()
                    if FLAGS.gan_gd_ratio < u:
                        sess.run(
                            [gan_g_train_op, model.increment_global_step_op])
                    elif FLAGS.gan_gd_ratio > u:
                        sess.run(
                            [gan_d_train_op, model.increment_global_step_op])
                else:
                    raise ValueError("Wrong gan_strategy.")

                if False:
                    # some criterion
                    sv.stop()

        sv.saver.save(sess, sv.save_path, global_step=sv.global_step)
        tf.logging.info(" training finished")