def find_matches(subscriptions, reddit, database):
     Logger.log('Finding Matches...', Color.GREEN)
     subreddits = {}
     matches = []
     for subscription in subscriptions:
         subreds = subscription.data[Subscription.SUBREDDITS]
         for subreddit in subreds:
             if subreddit.lower() not in [k.lower() for k in subreddits.keys()]:
                 Logger.log(subreddit.lower(), Color.CYAN)
                 submissions = reddit.get_submissions(subreddit.lower())
                 temp = []
                 for sub in submissions:
                     temp.append(sub)
                 subreddits[subreddit.lower()] = temp
             submissions = subreddits[subreddit.lower()]
             # submissions = reddit.get_submissions(subreddit)
             num = 0
             for submission in submissions:
                 num += 1
                 is_match, mismatched_keys = MatchFinder.is_match(subscription, submission)
                 if is_match:
                     already_exists = database.check_if_match_exists(subscription.username,
                                                                     subscription.to_string(),
                                                                     submission.permalink)
                     if not already_exists:
                         matches.append((subscription, submission))
     return matches
Exemplo n.º 2
0
   def _validate_building_data(self, b_dict):
      """
      Ensure a dictionary containing building information is actually valid
      for updating purposes. The main goal is to validate the presence and
      format of b_id and/or l_b_id.

      If no b_id is present but a l_b_id is valid, it is set as current b_id,
      which ensures the building does not get discarded.

      Arguments:
      - b_dict: a dictionary representing a building

      Return value: True if data is valid, False otherwise
      """
      b_id     = b_dict.get("b_id", "")
      l_b_id   = b_dict.get("l_b_id", "")

      if not Building.is_valid_bid(b_id):
         if Building.is_valid_bid(l_b_id):
            Logger.warning(
               "Invalid building id: \"{}\"".format(b_id),
               "- legacy id", l_b_id, "will be used instead."
               )
            b_dict["b_id"] = l_b_id
         else:
            Logger.error(
               "Building discarded:",
               "Invalid building id", b_id,
               "and no valid legacy id is present"
               )
            return False

      return True
Exemplo n.º 3
0
   def prepare_rooms(self, floor_id, rooms):
      """
      Transform a list of rooms in a dictionary indexed by room id.
      Arguments:
      - floor_id: a string representing the floor identifier,
      - rooms: a list of rooms.
      Returns: a dictionary of rooms.
      Validate the r_id using Building.is_valid_rid function and discard rooms
      with invalid id. Create and return a dictionary of validated rooms.
      """
      result = {}
      discarded_rooms = set()

      for r in map(self.sanitize_room, rooms):
         if not Building.is_valid_rid(r["r_id"]):
            discarded_rooms.add(r["r_id"])
            continue

         if "cat_id" in r:
            r["cat_id"] = RoomCategory.get_cat_id_by_name(r.get("cat_name", ""))
            del r["cat_name"]
         r_id = r["r_id"]
         del r["r_id"]
         result[r_id] = r

      if discarded_rooms:
         Logger.warning(
            "Rooms discarded from floor", floor_id,
            "for having an invalid room id:",
            ", ".join(discarded_rooms)
            )
      return result
Exemplo n.º 4
0
    def build_element(link_type, label):

        # find item which is desired
        link_text = None
        if label.find(':') != -1:
            link_text = label[label.find(':') + 1:]
            label = label[:label.find(':')]

        result = Globals.get_url_by_name(label, link_type)
        item = result[0]
        item_field = result[1]
        a = etree.Element('a')
        a.set('data-href', 'Alink')

        if item_field:
            a.text = link_text or item_field.href_name
            a.set('href', '#{item_field.href_id}'.format(item_field=item_field))

        elif item:
            a.text = link_text or item.href_name
            a.set('href', '#{item.href_id}'.format(item=item))
        else:
            Logger.instance().warning('Link not found %s %s' % (link_type, label))
            return ''
        a.set('text', a.text)
        return a
    def __val(self):
        """
          Validation function during the train phase.
        """
        self.seg_net.eval()
        start_time = time.time()

        for j, data_tuple in enumerate(self.val_loader):
            # Change the data type.
            inputs = Variable(data_tuple[0].cuda(async=True), volatile=True)
            targets = Variable(data_tuple[1].cuda(async=True), volatile=True)
            # Forward pass.
            outputs = self.seg_net(inputs)
            # Compute the loss of the val batch.
            loss_pixel = self.pixel_loss(outputs, targets)
            loss = loss_pixel

            self.val_losses.update(loss.data[0], inputs.size(0))

            # Update the vars of the val phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()

        self.module_utilizer.save_net(self.seg_net, self.iters)
        # Print the log info & reset the states.
        Log.info(
            'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
            'Loss {loss.avg:.8f}\n'.format(
            batch_time=self.batch_time, loss=self.val_losses))
        self.batch_time.reset()
        self.val_losses.reset()
        self.seg_net.train()
Exemplo n.º 6
0
class Configuration:
    
    def __init__(self, config_file = 'planner.ini'):
        self.logger = Logger().getLogger("configuration.Configuration")
        self._config_file = config_file
        current_directory = os.path.dirname(os.path.abspath(__file__))
        self._config_file_path = os.path.join(current_directory, '../etc/' + config_file)
        self.logger.debug('Initialize Configuration with ' + self._config_file_path)
        
        self.config = ConfigParser.ConfigParser()
        self.config.read(self._config_file_path)
    
    def get(self, section,option):
        if self.config.has_section(section) and self.config.has_option(section,option) :
            return self.config.get(section, option)
        else:
            return None
    
    def getDict(self, section,option):
        '''dict example: {1:'aaa',2:'bbb'}'''
        value = self.get(section, option)
        if value is not None:
            return ast.literal_eval(value)
        else:
            return value
Exemplo n.º 7
0
class UBSan:
    def get_name(self):
        return self.name


    def __init__(self, benchmark_path, log_file_path):
        self.pipeline = MakePipeline(benchmark_path)
        self.name = "UBSan"
        self.logger = Logger(log_file_path, self.name)
        self.output_dict = {}
        self.tp_set = set([])
        self.fp_set = set([])
        self.neg_count = 0
        os.chdir(os.path.expanduser(benchmark_path))

    def run(self):
        self.pipeline.build_benchmark(CC="clang", CFLAGS="-g -fsanitize=undefined -fsanitize=integer", LD="clang")
        self.pipeline.run_bechmark(self, [], 2)
        return self.output_dict

    def get_output_dict(self):
        return self.output_dict

    def get_tp_set(self):
        print len(self.tp_set)
        return self.tp_set

    def get_fp_set(self):
        print len(self.fp_set)
        return self.fp_set

    def analyze_output(self, exit_code, stdout, stderr, cur_dir, i, j):
        if len(stderr) > 0:
            print(stderr)
        if i not in self.output_dict:
            self.output_dict[i] = {"count": 0, "TP": 0, "FP": 0}
        self.output_dict[i]["count"] += 1
        if "runtime error" in (stdout + stderr).lower():
            if "w_Defects" in cur_dir:
                self.output_dict[i]["TP"] += 1
                self.logger.log_output(stderr, i, cur_dir, j, "TP")
                self.tp_set.add((i, j))
            else:
                self.output_dict[i]["FP"] += 1
                self.logger.log_output(stderr, i, cur_dir, j, "FP")
                self.fp_set.add((i, j))
        else:
            self.logger.log_output(stdout, i, cur_dir, j, "NEG")
            self.neg_count += 1

    def analyze_timeout(self, cur_dir, i, j):
        if i not in self.output_dict:
            self.output_dict[i] = {"count": 0, "TP": 0, "FP": 0}
        self.output_dict[i]["count"] += 1
        self.logger.log_output("", i, cur_dir, j, "NEG")
        self.neg_count += 1

    def cleanup(self):
        self.pipeline.clean_benchmark()
        self.logger.close_log()
    def get_vote_loss(self, key):
        if key == 'vote_loss':
            return VoteLoss()

        else:
            Log.error('Vote loss: {} is not valid.'.format(key))
            exit(1)
 def seg_net(self):
     key = self.configer.get('network', 'model_name')
     if key == 'erf_net':
         return ERFNet(self.configer.get('network', 'out_channels'))
     else:
         Log.error('Model: {} not valid!'.format(key))
         exit(1)
Exemplo n.º 10
0
    def _run(self, timeout):
        timeout = min([max_wait_time, timeout]) * self.scale

        def target():
            Logger.instance().info('Running command with time limit {:1.2f} s: {} in {}'.format(timeout, self.args.command, self.args.cwd))
            self.process = Popen(self.args.command, stdout=self.out, stderr=self.err, stdin=self.inn, cwd=self.args.cwd)
            Logger.instance().info('started PID {}'.format(self.process.pid))
            self.process.wait()  # process itself is not limited but there is global limit
            Logger.instance().info('Command finished with %d' % self.process.returncode)

        thread = threading.Thread(target=target)
        thread.start()
        thread.join(GlobalTimeout.time_left())

        if thread.is_alive():
            Logger.instance().info('Terminating process')
            self.terminated = True
            self.global_terminate = GlobalTimeout.time_left() < 0

            try:
                self.process.terminate()
            except Exception as e:
                print(e)

            try:
                self.process.kill()
            except Exception as e:
                print(e)
            thread.join()
Exemplo n.º 11
0
   def _print_merge_analysis(klass, source, target, building):

      method_name = "analyse_"+source+"_to_"+target
      merge_info  = getattr(FloorMergeAnalysis, method_name)(building)

      if not building.get_path(source+".floors"):
         return

      if not building.get_path(target+".floors"):
         return

      with Logger.info("{} -> {} Merge Analysis".format(source.upper(), target.upper())):
         for f_id, count, which in merge_info:
            total_rooms             = count["total_rooms"]
            identified_rooms        = data_and_percent(count["identified_rooms"], total_rooms)
            non_identified_rooms    = data_and_percent(count["non_identified_rooms"], total_rooms)

            message  = "Floor {:5} | ".format(f_id)
            message += "Rooms: {:<4} | ".format(total_rooms)
            message += "With Id.: {:<12} | ".format(identified_rooms)
            message += "No Id: {:<12}".format(non_identified_rooms)
            with Logger.info(message):
               if count["non_identified_rooms"]:
                  Logger.warning(
                     source,
                     "knows about room(s)",
                     ", ".join(which["non_identified_rooms"]),
                     "but", target, "does not"
                     )
Exemplo n.º 12
0
    def __monkey_checker(self):

        __act = self.__android_helper.getCurAct()
        __app = self.__android_helper.getCurApp()
        Logger.d("App: %s, Act: %s " % (__act, __app))

        # 第一次进入,设置Act为当前Act
        if self.__current_act == '':
            self.__current_act = __act
            self.__max_thinking_time = SETTING().get_max_thinging_time()
            self.__think_max_count = random.randint(1, self.__max_thinking_time)
            Logger.d("New Acticity, Max thing time = %s s" % self.__think_max_count)
        #如果两次相同,发送2次Back按键,并设置Act为空
        elif self.__current_act == __act:
            if self.__think_count == self.__think_max_count:
                self.__android_helper.senKey(4)
                self.__android_helper.senKey(4)
                self.__current_act = ''
                self.__think_count = 0
                Logger.d("Seam Acticity Max count: Back.")
            else:
                self.__think_count += 1
                Logger.d("Seam Activity think count " + str(self.__think_count))
        #如果两次不相同,则设置Act为当前act
        else:
            self.__current_act = __act
            self.__max_thinking_time = SETTING().get_max_thinging_time()
            self.__think_max_count = random.randint(1, self.__max_thinking_time)
            self.__think_count = 0
            Logger.d("Diff Activity think count empty, Reset Max thing time = %s s" % self.__think_max_count)
Exemplo n.º 13
0
class RRApp(object):
    """
    Abstract base class for all rr job renderer aplications.
    """

    def __init__(self):
        self.log = Logger(debug=True)

    def version(self):
        raise NotImplementedError()

    def open_scene(self):
        raise NotImplementedError()

    def set_env(self, name, value):
        if value is not None:
            os.environ[name] = value
            self.log.info('Environmental variable "%s" set to "%s"' % (name, value))
        else:
            self.log.info('Can not set environment "%s" to "%s"' % (name, value))

    def start_kso_server(self):
        """
        This function perform Keep Scene Open RR functionality.
        Start TCP server and listen for commands from client.
        """
        KSO_HOST = "localhost"
        KSO_PORT = 7774
        server = rrKSOServer((KSO_HOST, KSO_PORT), rrKSOTCPHandler)
        server.handle_command()

    def start_render(self):
        raise NotImplementedError()
Exemplo n.º 14
0
    def resolve_room_categories(klass, building, floor_dict=None):
        """
      Given a building, perform the mapping between it's dxf rooms and their
      relative categories.

      It does not save the building, which is responsibility of the caller.

      Arguments:
      - building: a Building object whose dxf rooms we want to process.
      - floor_dict: an (optional) dxf floor to limit the rooms to process. If
      None, all the current building floors will be used instead.

      Returns value: an integer representing the amount of rooms matched. The
      category name saved in place in each room dictionary, under the
      key "cat_name"
      """
        categorized_rooms = 0
        target_floors = floor_dict and [floor_dict] or building.get("dxf") and building.get("dxf")["floors"] or []

        cats = klass.get_room_categories_dict()
        for floor_dict in target_floors:
            categorized_rooms += klass._resolve_room_categories_for_floor(floor_dict, cats)

        if categorized_rooms:
            Logger.info(categorized_rooms, "rooms were categorized")

        return categorized_rooms
    def vis_peaks(self, heatmap, ori_img, name='default',
                  vis_dir=PEAK_DIR, scale_factor=1, img_size=(368, 368)):
        vis_dir = os.path.join(self.configer.get('project_dir'), vis_dir)
        if not os.path.exists(vis_dir):
            Log.error('Dir:{} not exists!'.format(vis_dir))
            os.makedirs(vis_dir)

        if not isinstance(heatmap, np.ndarray):
            if len(heatmap.size()) != 3:
                Log.error('Heatmap size is not valid.')
                exit(1)

            heatmap = heatmap.data.squeeze().cpu().numpy().transpose(1, 2, 0)

        if not isinstance(ori_img, np.ndarray):
            ori_img = DeNormalize(mean=[128.0, 128.0, 128.0],std=[256.0, 256.0, 256.0])(ori_img)
            ori_img = ori_img.data.cpu().squeeze().numpy().transpose(1, 2, 0)

        for j in range(self.configer.get('num_keypoints')):
            peaks = self.__get_peaks(heatmap[:, :, j].data.cpu().numpy())
            image_path = os.path.join(vis_dir, '{}_{}.jpg'.format(name, j))
            for peak in peaks:
                image = cv2.circle(ori_img, (peak[0], peak[1]),
                                   self.configer.get('vis', 'circle_radius'), (0,255,0), thickness=-1)
                image = self.scale_image(image, scale_factor, img_size)
                cv2.imwrite(image_path, image)
Exemplo n.º 16
0
 def __init__(self, quiet_start=0, quiet_end=0):
     if quiet_end < quiet_start:
         Logger.log('Invalid Quiet Hours.', Color.RED)
         exit()
     self.quiet_start = quiet_start
     self.quiet_stop = quiet_end
     self.is_quiet = False
    def test(self):
        base_dir = os.path.join(self.configer.get('project_dir'),
                                'val/results/pose', self.configer.get('dataset'), 'test')
        if not os.path.exists(base_dir):
            os.makedirs(base_dir)

        test_img = self.configer.get('test_img')
        test_dir = self.configer.get('test_dir')
        if test_img is None and test_dir is None:
            Log.error('test_img & test_dir not exists.')
            exit(1)

        if test_img is not None and test_dir is not None:
            Log.error('Either test_img or test_dir.')
            exit(1)

        if test_img is not None:
            filename = test_img.rstrip().split('/')[-1]
            save_path = os.path.join(base_dir, filename)
            self.__test_img(test_img, save_path)

        else:
            for filename in self.__list_dir(test_dir):
                image_path = os.path.join(test_dir, filename)
                save_path = os.path.join(base_dir, filename)
                self.__test_img(image_path, save_path)
    def select_pose_model(self):
        key = self.configer.get('method')
        if key == 'open_pose':
            if self.configer.get('phase') == 'train':
                return OpenPose(self.configer)
            else:
                return OpenPoseTest(self.configer)

        elif key == 'conv_pose_machine':
            if self.configer.get('phase') == 'train':
                return ConvPoseMachine(self.configer)
            else:
                return ConvPoseMachineTest(self.configer)

        elif key == 'associative_embedding':
            if self.configer.get('phase') == 'train':
                return AssociativeEmbedding(self.configer)
            else:
                return AssociativeEmbeddingTest(self.configer)

        elif key == 'fashion_ai':
            if self.configer.get('phase') == 'train':
                return FashionAI(self.configer)
            else:
                return FashionAITest(self.configer)

        else:
            Log.error('Pose Model: {} is not valid.'.format(key))
            exit(1)
    def select_det_model(self):
        key = self.configer.get('method')
        if key == 'pose_top_down':
            return ConvPoseMachine(self.configer)

        else:
            Log.error('Det Model: {} is not valid.'.format(key))
def information_exception(username):
    Logger.log(
        'information exception caught\n' +
        'username:   '******'\n' +
        'stacktrace: ' + '\n' +
        traceback.format_exc() + '\n\n',

        Color.RED)
def subscriptions_exception(username):
    Logger.log(
        'subscriptions exception caught\n' +
        'username:   '******'\n' +
        'stacktrace: ' + '\n' +
        traceback.format_exc() + '\n\n',

        Color.RED)
 def __test_img(self, image_path, save_path):
     if self.configer.get('dataset') == 'cityscape':
         self.__test_cityscape_img(image_path, save_path)
     elif self.configer.get('dataset') == 'laneline':
         self.__test_laneline_img(image_path, save_path)
     else:
         Log.error('Dataset: {} is not valid.'.format(self.configer.get('dataset')))
         exit(1)
def information(username):
    Logger.log(
        '-------------------------------\n' +
        '         INFORMATION\n' +
        'username: '******'\n' +
        '-------------------------------\n\n',

        Color.GREEN)
def unsubscribe_all_exception(username):
    Logger.log(
        'unsubscribe all exception caught\n' +
        'username:   '******'\n' +
        'stacktrace: ' + '\n' +
        traceback.format_exc() + '\n\n',

        Color.RED)
    def get_relation_loss(self, key):
        if key == 'embedding_loss':
            return EmbeddingLoss(num_keypoints=self.configer.get('data', 'num_keypoints'),
                                 l_vec=self.configer.get('capsule', 'l_vec'))

        else:
            Log.error('Relation loss: {} is not valid.'.format(key))
            exit(1)
def subscriptions(username):
    Logger.log(
        '-------------------------------\n' +
        '         SUBSCRIPTIONS\n' +
        'username: '******'\n' +
        '-------------------------------\n\n',

        Color.GREEN)
def unsubscribe_all(username):
    Logger.log(
        '-------------------------------\n' +
        '         UNSUBSCRIBE ALL\n' +
        'username: '******'\n' +
        '-------------------------------\n\n',

        Color.RED)
 def handle_unsubscribe_from_num_message(database, message, payload):
     Logger.log('Unsub from num')
     removed = database.remove_subscription_by_number(str(message.author), int(payload))
     subs = database.get_subscriptions_by_user(str(message.author))
     if removed:
         message.reply(inbox.compose_unsubscribe_from_num_message(str(message.author), removed, subs))
     else:
         message.reply(inbox.compose_unsubscribe_invalid_sub_message(str(message.author), subs))
     message.mark_as_read()
 def check_for_commands(self):
     Logger.log('Checking for commands')
     commands = CommandHandler.get_commands(self.reddit)
     if CommandHandler.PAUSE in commands:
         self.run = False
     if CommandHandler.RUN in commands:
         self.run = True
     if CommandHandler.KILL in commands:
         exit()
Exemplo n.º 30
0
class Valgrind:
    def get_name(self):
        return self.name


    def __init__(self, benchmark_path, log_file_path):
        self.pipeline = MakePipeline(benchmark_path)
        self.name = "Valgrind"
        self.logger = Logger(log_file_path, self.name)
        self.output_dict = {}
        self.tp_set = set([])
        self.fp_set = set([])
        self.neg_count = 0
        os.chdir(os.path.expanduser(benchmark_path))

    def run(self):
        self.pipeline.build_benchmark(CC="gcc", CFLAGS="", LD="gcc")
        self.pipeline.run_bechmark(self, ["valgrind", "--error-exitcode=10"], 6)
        return self.output_dict

    def get_output_dict(self):
        return self.output_dict

    def get_tp_set(self):
        print len(self.tp_set)
        return self.tp_set

    def get_fp_set(self):
        print len(self.fp_set)
        return self.fp_set

    def analyze_output(self, exit_code, stdout, stderr, cur_dir, i, j):
        print(stderr)
        if i not in self.output_dict:
            self.output_dict[i] = {"count": 0, "TP": 0, "FP": 0}
        self.output_dict[i]["count"] += 1
        if exit_code != 0:
            if "w_Defects" in cur_dir:
                self.output_dict[i]["TP"] += 1
                self.logger.log_output(stderr, i, cur_dir, j, "TP")
                self.tp_set.add((i, j))
            else:
                self.output_dict[i]["FP"] += 1
                self.logger.log_output(stderr, i, cur_dir, j, "FP")
                self.fp_set.add((i, j))
        else:
            self.logger.log_output(stdout, i, cur_dir, j, "NEG")

    def analyze_timeout(self, cur_dir, i, j):
        if i not in self.output_dict:
            self.output_dict[i] = {"count": 0, "TP": 0, "FP": 0}
        self.output_dict[i]["count"] += 1
        self.logger.log_output("", i, cur_dir, j, "NEG")

    def cleanup(self):
        self.pipeline.clean_benchmark()
        self.logger.close_log()
Exemplo n.º 31
0
class Net(object):
    def __init__(self, class_num=CLASS_NUM, init_build=True):
        net_config = tf.ConfigProto(
            allow_soft_placement=True,
            log_device_placement=False,
            intra_op_parallelism_threads=1,
            gpu_options=tf.GPUOptions(allow_growth=True
                                      # force_gpu_compatible=True
                                      ))
        # net_config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1

        self.__loaded = False
        self.run_time = RUN_TIME
        self.summary_path = os.path.join(ROOT_PATH, 'log',
                                         'summary_%s' % self.run_time)

        self.logger = Logger('net')
        self.class_num = class_num
        self.sess = tf.Session(config=net_config)
        self.graph = self.sess.graph
        self.img = None
        self.label = None
        self.train_step = None
        self.val_step = None
        self.val_data = None
        self.val_summary = None
        self.tower_loss = []
        self.tower_grads = []
        self.tower_preds = []
        self.tower_accuracy = []
        self.prediction = None
        self.accuracy = None
        self.loss = None
        self.trainer = None
        self.summary = None
        self.writer = None
        self.saver = None
        self.init_build = init_build

        if init_build:
            with self.graph.as_default(), tf.variable_scope('net_base'):
                self.img = tf.placeholder(tf.float32,
                                          [None, SIZE, SIZE, MOD_NUM],
                                          name='img')
                self.label = tf.placeholder(tf.float32, [None, CLASS_NUM],
                                            name='label')
                # self.inputs = tf.reshape(self.img, [-1, SIZE, SIZE, MOD_NUM])
                # self.inputs = tf.identity(self.img, 'inputs')

                self.train_step = tf.train.get_or_create_global_step(
                    self.graph)
                with tf.variable_scope('validation_net'):
                    self.val_step = tf.Variable(0,
                                                dtype=tf.int32,
                                                name='v_step')
                    self.val_data = {
                        'avg_acc':
                        tf.Variable(0.0, dtype=tf.float32, name='avg_acc'),
                        'tp':
                        tf.Variable(0.0, dtype=tf.float32, name='tp'),
                        'tn':
                        tf.Variable(0.0, dtype=tf.float32, name='tn'),
                        'fp':
                        tf.Variable(0.0, dtype=tf.float32, name='fp'),
                        'fn':
                        tf.Variable(0.0, dtype=tf.float32, name='fn')
                    }

                    for name, var in self.val_data.items():
                        tf.summary.scalar(name, var, ['validation_summary'])
                    self.val_summary = tf.summary.merge(
                        self.graph.get_collection('validation_summary'),
                        name='val_summary')

    def start(self):
        if not self.__loaded:
            self.sess.run(tf.global_variables_initializer())
            self.__loaded = True

    def train(self,
              file_list,
              val_file_list,
              batch_size=BATCH_SIZE,
              val_batch_size=BATCH_SIZE,
              repeat_time=REPEAT_NUM,
              val_interval=VAL_INTERVAL,
              summary_interval=SUMMARY_INTERVAL):
        with self.graph.as_default():
            self.start()
            self.logger.info('Train start')
            _, next_batch = generate_dataset(file_list,
                                             batch_size,
                                             repeat_time=repeat_time)
            while True:
                try:
                    img, label = self.sess.run(next_batch)
                    _, train_step, summary, accuracy, loss = self.sess.run(
                        fetches=[
                            self.trainer, self.train_step, self.summary,
                            self.accuracy, self.loss
                        ],
                        feed_dict={
                            self.img: img,
                            self.label: label
                        })

                    if train_step % val_interval == 0:
                        self.validate(val_file_list, val_batch_size)
                    elif train_step % summary_interval == 0:
                        self.writer.add_summary(summary, train_step)
                        self.logger.info(
                            'Training summary %d, accuracy: %f, loss %f' %
                            (train_step, accuracy, loss))
                except tf.errors.OutOfRangeError:
                    break
            self.logger.info('Train end')

    def predict(self, img):
        return self.sess.run(self.prediction, feed_dict={self.img: img})

    def whole_predict(self,
                      tfr_name,
                      batch_size=BATCH_SIZE,
                      with_label=True,
                      final_result=False,
                      min_connect_tumor_num=MIN_CONNECT_TUMOR_NUM,
                      min_tumor_num=MIN_TUMOR_NUM,
                      random=True):
        with self.graph.as_default():
            _, next_batch = generate_dataset([tfr_name],
                                             None,
                                             train=False,
                                             shuffle=False,
                                             batch=False)
            tfr_predict = []
            tfr_imgs, tfr_labels = self.sess.run(next_batch)
            tfr_size = len(tfr_labels)
            if random:
                rank = np.arange(tfr_size)
                for _ in range(WHOLE_REPEAT_NUM):
                    rank_copy = rank.copy()
                    np.random.shuffle(rank_copy)
                    predict = []
                    for i in range(0, tfr_size, batch_size):
                        imgs = tfr_imgs[rank_copy][i:i + batch_size]
                        if len(imgs) < batch_size:
                            imgs = np.concatenate(
                                [imgs] * batch_size).astype(int)[:batch_size]
                        predict.append(self.predict(imgs))
                    predict = np.concatenate(predict)[:tfr_size]
                    pair = list(zip(predict, rank_copy))
                    pair.sort(key=lambda x: x[1])
                    tfr_predict.append(np.stack(np.array(pair).T[0]))
                tfr_predict = np.array(tfr_predict).mean(axis=0).argmax(axis=1)
            else:
                for i in range(0, tfr_size, batch_size):
                    imgs = tfr_imgs[i:i + batch_size]
                    if len(imgs) < batch_size:
                        imgs = np.concatenate(
                            [imgs] * batch_size).astype(int)[:batch_size]
                    tfr_predict.append(self.predict(imgs))
                tfr_predict = np.concatenate(tfr_predict).argmax(
                    axis=1)[:tfr_size]

            tfr_zero_predict_pos = np.where(tfr_predict == 0)[0]
            for pos in tfr_zero_predict_pos:
                if 0 < pos < tfr_predict.size - 1:
                    if tfr_predict[pos - 1] == tfr_predict[pos + 1] == 1:
                        tfr_predict[pos] = 1

            tfr_one_predict_pos = np.where(tfr_predict == 1)[0]
            for pos in tfr_one_predict_pos:
                if 0 < pos < tfr_predict.size - 1:
                    if tfr_predict[pos - 1] == tfr_predict[pos + 1] == 0:
                        tfr_predict[pos] = 0

            if final_result:
                tfr_predict = self.validate_slice_to_whole(
                    tfr_predict, min_tumor_num, min_connect_tumor_num)

            if with_label:
                return tfr_predict, tfr_labels.argmax(axis=1)
            return tfr_predict

    def validate_slice_to_whole(self,
                                predict,
                                min_tumor_num=MIN_TUMOR_NUM,
                                min_connect_tumor_num=MIN_CONNECT_TUMOR_NUM):
        tumor_predict_pos = np.where(predict == 1)[0]
        if tumor_predict_pos.size >= min_tumor_num:
            if min_connect_tumor_num in np.convolve(
                    predict, np.ones(min_connect_tumor_num), mode='same'):
                return 1
        return 0

    def validate_report(self,
                        predict_list,
                        label_list,
                        test,
                        tumor_as_1=True,
                        quiet=False):
        tumor_num, normal_num = int(tumor_as_1), int(not tumor_as_1)
        tumor_list = predict_list[label_list == tumor_num]  # tumor result
        normal_list = predict_list[label_list == normal_num]  # normal result
        if tumor_list.size and normal_list.size:
            tp_rate = (tumor_list == tumor_num).sum() / \
                len(tumor_list)     # t -> f
            fn_rate = 1 - tp_rate  # t -> t
            tn_rate = (normal_list == normal_num).sum() / \
                len(normal_list)  # f -> f
            fp_rate = 1 - tn_rate  # f -> t
        elif not tumor_list.size:
            tn_rate = (normal_list == normal_num).sum() / \
                len(normal_list)  # f -> f
            fp_rate = 1 - tn_rate  # f -> t
            fn_rate = 0.0
            tp_rate = 0.0
        elif not normal_list.size:
            tp_rate = (tumor_list == tumor_num).sum() / \
                len(tumor_list)     # t -> f
            fn_rate = 1 - tp_rate  # t -> t
            fp_rate = 0.0
            tn_rate = 0.0
        avg_accuracy = np.equal(predict_list, label_list).mean()
        train_step = self.sess.run(self.train_step)

        if not quiet:
            report_result = '(train step %d) Average accuary %f, TP %f, TN %f, FP %f, FN %f' % (
                train_step, avg_accuracy, tp_rate, tn_rate, fp_rate, fn_rate)
            self.logger.info('Validation end')
            self.logger.info(report_result)
            with open(os.path.join(self.summary_path, 'result'), 'a+') as file:
                file.write(report_result + '\n')

            if not test:
                self.sess.run([
                    tf.assign(self.val_data['avg_acc'], avg_accuracy),
                    tf.assign(self.val_data['tp'], tp_rate),
                    tf.assign(self.val_data['tn'], tn_rate),
                    tf.assign(self.val_data['fp'], fp_rate),
                    tf.assign(self.val_data['fn'], fn_rate)
                ])
                self.writer.add_summary(self.sess.run(self.val_summary),
                                        train_step)

        return {
            'avg_accuracy': avg_accuracy,
            'tp_rate': tp_rate,
            'tn_rate': tn_rate,
            'fp_rate': fp_rate,
            'fn_rate': fn_rate
        }

    def validate(self, file_list, batch_size=BATCH_SIZE, test=False):
        with self.graph.as_default():
            _, next_batch = generate_dataset(file_list,
                                             batch_size,
                                             train=False,
                                             shuffle=True,
                                             batch=True)
            val_step_initer = tf.variables_initializer([self.val_step])
            self.sess.run(val_step_initer)
            label_list = []
            predict_list = []
            while True:
                try:
                    img, label = self.sess.run(next_batch)
                    accuracy, loss, predict, val_step = self.sess.run(
                        fetches=[
                            self.accuracy, self.loss, self.prediction,
                            self.val_step.assign_add(1)
                        ],
                        feed_dict={
                            self.img: img,
                            self.label: label
                        })
                    if val_step % VAL_SUMMARY_INTERVAL == 0:
                        self.logger.info(
                            'Validation summary %d, accuracy: %f, loss: %f' %
                            (val_step, accuracy, loss))
                    predict_list.append(predict.argmax(axis=1))
                    label_list.append(label.argmax(axis=1))

                except tf.errors.OutOfRangeError:
                    break

            predict_list = np.concatenate(predict_list)
            label_list = np.concatenate(label_list)
            self.validate_report(predict_list,
                                 label_list,
                                 test,
                                 tumor_as_1=True)
        return predict_list, label_list

    def whole_validate(self,
                       file_list,
                       batch_size=BATCH_SIZE,
                       test=False,
                       condition=None,
                       min_connect_tumor_num=MIN_CONNECT_TUMOR_NUM,
                       min_tumor_num=MIN_TUMOR_NUM):
        if condition is None:
            condition = lambda tfr_name, tfr_labels: 1 in tfr_labels
        with self.graph.as_default():
            file_num = len(file_list)
            label_list = np.zeros((file_num), dtype=np.int64)
            predict_list = np.zeros((file_num), dtype=np.int64)
            for num, tfr_name in enumerate(file_list):
                predict_list[num], tfr_labels = self.whole_predict(
                    tfr_name,
                    batch_size,
                    with_label=True,
                    final_result=True,
                    min_connect_tumor_num=min_connect_tumor_num,
                    min_tumor_num=min_tumor_num)
                if condition(tfr_name, tfr_labels):
                    label_list[num] = 1

            self.validate_report(predict_list,
                                 label_list,
                                 test,
                                 tumor_as_1=True)
        return predict_list, label_list

    def whole_validate_best(self,
                            file_list,
                            batch_size=BATCH_SIZE,
                            test=False,
                            condition=None):
        if condition is None:
            condition = lambda tfr_name, tfr_labels: 1 in tfr_labels
        with self.graph.as_default():
            file_num = len(file_list)
            label_list = np.zeros((file_num), dtype=np.int64)
            predict_slice_list = list()
            for num, tfr_name in enumerate(file_list):
                tfr_predict, tfr_labels = self.whole_predict(
                    tfr_name, batch_size, with_label=True, final_result=False)
                predict_slice_list.append(tfr_predict)
                if condition(tfr_name, tfr_labels):
                    label_list[num] = 1

        result = dict()
        for min_connect_tumor_num in range(1, 10):
            for min_tumor_num in range(1, 25):
                func = partial(self.validate_slice_to_whole,
                               min_connect_tumor_num=min_connect_tumor_num,
                               min_tumor_num=min_tumor_num)
                predict_list = np.array(list(map(func, predict_slice_list)))
                tfr_result = self.validate_report(predict_list,
                                                  label_list,
                                                  test,
                                                  tumor_as_1=True,
                                                  quiet=True)
                result[(min_connect_tumor_num,
                        min_tumor_num)] = tfr_result['avg_accuracy']

        sorted_result = sorted(result.items(),
                               key=lambda x: x[1],
                               reverse=True)
        best_result = sorted_result[0][1]
        best_pairs = [
            '(min_connect_tumor_num=%d, min_tumor_num=%d)' %
            (connect_tumor_num, tumor_num)
            for (connect_tumor_num, tumor_num), result in sorted_result
            if result == best_result
        ]

        report_result = 'Best params is %s with accuary %f' % (
            ', '.join(best_pairs), best_result)
        self.logger.info(report_result)
        with open(os.path.join(self.summary_path, 'result'), 'a+') as file:
            file.write(report_result + '\n')

    def save(self, model_name):
        if self.__loaded:
            model_path = os.path.join(self.summary_path, 'model', model_name)
            self.saver.save(self.sess, model_path)
            self.logger.info('Save model %s' % model_path)

    def load(self, model_path):
        if not self.__loaded:
            self.run_time = model_path.strip('/').split('/')[-3][8:]
            self.summary_path = os.path.join(ROOT_PATH, 'log',
                                             'summary_%s' % self.run_time)
            self.logger.reset_log_path(self.summary_path)
            self.logger.info('Change run time back to %s' % self.run_time)
            if not self.init_build:
                self.saver = tf.train.import_meta_graph(os.path.join(
                    self.summary_path, 'model', 'model.meta'),
                                                        clear_devices=True)
                self.img = self.graph.get_tensor_by_name('net_base/img:0')
                self.label = self.graph.get_tensor_by_name('net_base/label:0')
                self.train_step = self.graph.get_tensor_by_name(
                    'net_base/global_step:0')
                self.val_step = self.graph.get_tensor_by_name(
                    'net_base/validation_net/v_step:0')
                self.val_data = {
                    'avg_acc':
                    self.graph.get_tensor_by_name(
                        'net_base/validation_net/avg_acc:0'),
                    'tp':
                    self.graph.get_tensor_by_name(
                        'net_base/validation_net/tp:0'),
                    'tn':
                    self.graph.get_tensor_by_name(
                        'net_base/validation_net/tn:0'),
                    'fp':
                    self.graph.get_tensor_by_name(
                        'net_base/validation_net/fp:0'),
                    'fn':
                    self.graph.get_tensor_by_name(
                        'net_base/validation_net/fn:0')
                }
                self.val_summary = tf.summary.merge(
                    self.graph.get_collection('validation_summary'),
                    name='val_summary')
            self.saver.restore(self.sess, model_path)
            self.__loaded = True
            self.logger.info('Load model %s' % model_path)
Exemplo n.º 32
0
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait  # 显性等待时间
from utils import assertion
from tools.common.BrowserDriver import BrowserDriver
from HTMLTestRunner_cn import HTMLTestRunner
from utils.file_read import YamlRead
from utils.file_write import YamlWrite

import unittest
import time
import os
import sys
import random

logger = Logger('logger').getlog()


class Test_NamePlateScreen(unittest.TestCase):
    def setUp(self):
        self.sc = Config()
        self.b = BrowserDriver()
        self.driver = self.b.OpenBrowser()
        self.driver.implicitly_wait(5)  # 全局隐形等待30S
        self.imgs = []
        self.ym = YamlWrite()
        # 每次执行用例前都登陆网页
        self.b.by_find_element('name', 'account').send_keys(
            self.sc.getConfig('User').get('username'))
        self.b.by_find_element('name', 'password').send_keys(
            self.sc.getConfig('User').get('password'))
Exemplo n.º 33
0
from utils.logger import Logger
from utils.sampler import SamplerConfig
from utils.loss import LossConfig
from utils.optim import OptimConfig
from utils.dataloader import LoaderConfig
from model.gan import GanConfig, GanSchema, BaseGan
from model.zoo import ModelConfig
from dataset.config import DatasetConfig
from matplotlib import pyplot as plt
import os


logger = Logger(cat='test')


@BaseGan.register('Vanilla')
class VanillaGan(BaseGan):
    @BaseGan.add_post_iterate_hook
    def vis_gen(self, iter_num):
        if (iter_num + 1) % 1000 != 0:
            return
        self.eval_mode()
        x_generated = self.sampling(100, 100)
        xrange = ((self.dataset.data[:, 0].min() - 1, self.dataset.data[:, 0].max() + 1),
                  (self.dataset.data[:, 1].min() - 1, self.dataset.data[:, 1].max() + 1))
        fig = plt.figure()
        plt.hist2d(x_generated[:, 0], x_generated[:, 1], bins=100, range=xrange)
        plt.savefig(os.path.join(logger.img_dir, 'hmap_gen_{}.jpg'.format(iter_num)))
        plt.close()

        fig = plt.figure()
Exemplo n.º 34
0
from selenium import webdriver
from utils.logger import Logger

logger = Logger(logger="Browser").logger


#浏览器驱动配置
class Browser(object):
    def __init__(self):
        self.driver = webdriver.Chrome()
        logger.info("Starting browser:%s" % self.driver)


if __name__ == '__main__':
    dr1 = Browser()
    dr = dr1.driver
    dr.get('http://www.baidu.com')
    dr.quit()
Exemplo n.º 35
0
    print(model, '\n')

    # TODO: Remove and handle with checkpoints
    if not args.train:
        print("Loading Model Weights ...")
        evaluation_state_dict = torch.load(args.eval_ckpt)
        model_dict = model.state_dict(full_dict=True)
        model_dict.update(evaluation_state_dict)
        model.load_state_dict(model_dict)
        model.eval()

    if args.train:
        val_dataset.set_label_usage(dataset.return_labels)

    # Create logger
    logger = Logger(os.path.join(job_path, 'logs'))

    # Get trainer
    trainer_creator = getattr(TrainerLoader, args.model)
    trainer = trainer_creator(args, model, dataset, data_loader, logger,
                              device)
    if args.train:
        evaluator = trainer_creator(args, model, val_dataset, val_data_loader,
                                    logger, device)
        evaluator.train = False

    if args.train:
        print("Training ...")
    else:
        print("Evaluating ...")
        vars(args)['num_epochs'] = 1
Exemplo n.º 36
0
def main():
    """
    Run main program
    """
    parser = create_parser()
    options, args = parse_args(parser)

    # create instance of formatter
    from ist.ist_formatter_module import ISTFormatter
    formatter = ISTFormatter()

    # read input json file
    with open(options.input, 'r') as fp:
        json_data = json.load(fp)
        ist_info = {
            'version':
            json_data['version']['flow123d_version']
            if 'version' in json_data else 'Input reference'
        }
        json_data = json_data[
            'ist_nodes'] if 'ist_nodes' in json_data else json_data

        # filter out unsupported types, they won't be formatted
        items = list()
        for json_item in json_data:
            input_type = json_item[
                'input_type'] if 'input_type' in json_item else None

            if input_type in registered_nodes:
                item = registered_nodes[input_type]()
                item.parse(json_item)
                items.append(item)
            else:
                Logger.instance().info(' - item type not supported: %s' %
                                       str(json_item))

    # if we have all items parsed we create references
    for item in items:
        if getattr(item, 'input_type',
                   InputType.UNKNOWN) == InputType.MAIN_TYPE:
            if item.input_type == InputType.RECORD:
                for key in getattr(item, 'keys', []):
                    if key.type.get_reference().input_type == InputType.ARRAY:
                        key.type.get_reference().subtype.get_reference(
                        ).add_link(item)
                    else:
                        key.type.get_reference().get_generic_root().add_link(
                            item)

            if item.input_type == InputType.ABSTRACT_RECORD:
                for imp in getattr(item, 'implementations', []):
                    imp.get_reference().add_link(item)

    # disable sort for now (type and name) keep items order unchanged
    # items = sorted(items, key=lambda x: '{}{}'.format(x.input_type.value, x.name))

    # convert to tex format
    if options.format.lower() in ('tex', 'latex'):
        Logger.instance().info('-' * 80)
        Logger.instance().info('Formatting ist to tex format')
        from ist.utils.texlist2 import TexList
        TexList.PRETTY_FORMAT = options.debug
        formatter.json2latex(items, options.output, info=ist_info)
        if os.path.isfile(options.output):
            print('Ok: File "{:s}" created'.format(options.output))
            sys.exit(0)
        else:
            print('Error: File "{:s}" does not exists'.format(options.output))
            sys.exit(1)

    # convert to HTML format
    if options.format.lower() in ('html', 'html5', 'www', 'htm'):
        Logger.instance().info('-' * 80)
        Logger.instance().info('Formatting ist to html format')
        formatter.json2html(items, options.output, info=ist_info)
        if os.path.isfile(options.output):
            print('Ok: File "{:s}" created'.format(options.output))
            sys.exit(0)
        else:
            print('Error: File "{:s}" does not exists'.format(options.output))
            sys.exit(1)

    if options.format.lower() in ('markdown', 'md'):
        Logger.instance().info('Testing markdown')
        text = '''
# Using markdown in description

**Description field** supports markdown syntax (support is partial and some techniques may not work in Python markdown implementation).

## Links

Link to record [[root]] selection [[DG_output_fields]] or abstract [[Transport]]. All links are in the same format.
If `link_name` is specified in `attributes` (let say DG_output_fields has link_name of DG), we can use that [[DG]]
Record and Selection types offer links to their keys/values, so you can write [[DG#porosity]], to specify link text use following syntax [[DG#porosity:poro]]

or link to key in Root item [[root#flow123d_version]]




Every name should be unique, if `link_name` is duplicate first occurrence will be used!
To avoid conflict with `link_name` or `name` we can use type specification like this [[record#root]]. 3 types are registered:

 1. type RECORD supporting prefixes:

   - r
   - record

 2. type SELECTION supporting prefixes:

   - s
   - selection

 3. type ABSTRACT supporting prefixes:

   - a
   - ar
   - abstract


## Basics
We can write **bold** statements (or *italic* if needed). We can also ~~strkkethrg~~ strikethrough some text to express some change.

Another usage can be in lists, we can write both unordered and ordered list. Important is to place one empty line before list starts.
Unordered list:

 - important item
 - another important item

Ordered list have same rules:

 1. item number 1
 2. and item number 2

To write code section with monospaced font, such as variable use `this` syntax.

**Note** Use line breaks \\n chars sparely, and only break text-flow if necessarily.

Full markdown specification can be found [here](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet) but since Python is used to parse markdown, there may be slight differences.
(($q_w$))
        '''
        o = htmltree()
        o.description(text)
        print(o.dump())
        sys.exit(0)

    Logger.instance().error("Error: Unsupported format '{:s}'".format(
        options.format))
    sys.exit(1)
Exemplo n.º 37
0
        def log(*args, **kwargs):

            msg = copy.deepcopy(request.json)
            Logger().info({"body": msg, "url": request.url})

            return f(*args, **kwargs)
Exemplo n.º 38
0
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            num_iters += 1
    return correct / total, running_loss / num_iters


if __name__ == "__main__":
    if len(sys.argv) < 2:
        raise ValueError('Please enter config file path.')

    # Read configs
    configFile = sys.argv[1]
    with open(configFile, 'r') as f:
        config = yaml.safe_load(f)

    # Create a logger
    logger = Logger(config['logdir'])
    logger.log_string(datetime.datetime.now())
    logger.log_string(config)
    logger.backup_files(['train.py', 'qpu_ops.py', 'qpu_layers.py'])

    if config['dataset'] == 'ntu':
        logger.backup_files(['data/ntu/feeder.py'])
    if config['dataset'] == 'fpha':
        logger.backup_files(['data/fpha/feeder.py'])

    # Train
    train(config, logger)
Exemplo n.º 39
0
    message = 'KFold training and evaluation has been done.\n'
    message += f'F1 majority voting - Avg: {f1_majority_mean}, Std: {f1_majority_std}\n'
    message += f'F1 optimized - Avg: {f1_optimized_mean}, Std: {f1_optimized_std}\n'
    message += f'Threshold - Avg: {threshold_mean}'
    logger.post(message)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch-size', default=512, type=int)
    parser.add_argument('--device-ids',
                        metavar='N',
                        type=int,
                        nargs='+',
                        default=[0])
    parser.add_argument('--max-workers', default=2, type=int)
    parser.add_argument('--debug', action='store_true')
    args = parser.parse_args().__dict__

    log_dir = LOG_DIR.joinpath(f'{SCRIPT_NAME}')
    logger = Logger(SCRIPT_NAME,
                    log_dir=log_dir,
                    webhook_url=SLACK_URL,
                    overwrite=True)
    logger.info(f'Script starts with command line arguments: {args}')
    try:
        main(logger, args)
        logger.post('===== Script completed successfully! =====')
    except Exception as e:
        logger.exception(e)
Exemplo n.º 40
0
 def _init_logger(self):
     logger = Logger(os.path.join(self.model_dir, 'logs'))
     return logger
Exemplo n.º 41
0
 def __init__(self):
     self.logger = Logger(__name__, PathMgr.get_log_path())
     self.historical_data_provider = DBProvider()
Exemplo n.º 42
0
 def __init__(self, logger=Logger(__name__, None)):
     self.logger = logger
Exemplo n.º 43
0
def main():

    new_exp = input("Is this a new experiment? [Y/N]")
    if new_exp == 'Y':
        # capture the config path from the run arguments
        # then process the json configuration file
        config_filename = '/home/ADAMGE/action_recognition/action_recognition_v1/configs/params.json'

    elif new_exp == 'N':
        config_filename = input(
            "Enter the full path of the config file in the old experiment folder"
        )
    else:
        print("Wrong input")
        exit()

    paths_filename = '/home/ADAMGE/action_recognition/action_recognition_v1/configs/paths.json'
    config = process_config(config_filename, paths_filename, new_exp)

    # create the experiments dirs and write the JSON file to the dir
    create_dirs([config.summary_dir, config.checkpoint_dir], config_filename)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
    sess_config = tf.ConfigProto(gpu_options=gpu_options)

    # create tensorflow session
    sess = tf.Session(config=sess_config)

    # create an instance of the model you want
    model = ExampleModel(config)

    # create your data generator
    data_train = DataGenerator(model,
                               config,
                               sess,
                               'train',
                               shuffle=True,
                               augment=False)
    data_validate = DataGenerator(model,
                                  config,
                                  sess,
                                  'validate',
                                  shuffle=False,
                                  augment=False)

    # create tensorboard logger
    logger = Logger(sess, config)

    # create trainer and pass all the previous components to it
    trainer = ExampleTrainer(sess, model, data_train, data_validate, config,
                             logger)

    # restore mobile net
    model.restore_mobile_net(sess)

    # load model if exists - only the lstm
    if new_exp == 'N':
        model.load(sess)

    # training
    trainer.train()
Exemplo n.º 44
0
class Agent(object):
    def __init__(self,
                 config,
                 policy,
                 global_episode,
                 n_agent=0,
                 agent_type='exploration',
                 log_dir=''):
        print(f"Initializing agent {n_agent}...")
        self.config = config
        self.n_agent = n_agent
        self.agent_type = agent_type
        self.max_steps = config['max_ep_length']
        self.num_episode_save = config['num_episode_save']
        self.global_episode = global_episode
        self.local_episode = 0
        self.log_dir = log_dir

        # Create environment
        self.env_wrapper = create_env_wrapper(config)
        self.env_wrapper.env.set_agent(self.n_agent)
        self.ou_noise = OUNoise(dim=config["action_dim"],
                                low=config["action_low"],
                                high=config["action_high"])
        self.ou_noise.reset()

        self.actor = policy

        # Logger
        log_path = f"{log_dir}/agent-{n_agent}"
        self.logger = Logger(log_path)

    def update_actor_learner(self, learner_w_queue):
        """Update local actor to the actor from learner. """
        if learner_w_queue.empty():
            return
        source = learner_w_queue.get()
        target = self.actor
        for target_param, source_param in zip(target.parameters(), source):
            w = torch.tensor(source_param).float()
            target_param.data.copy_(w)

    def run(self, training_on, replay_queue, learner_w_queue, update_step):
        # Initialise deque buffer to store experiences for N-step returns
        self.exp_buffer = deque()

        best_reward = -float("inf")
        rewards = []
        while training_on.value:
            episode_reward = 0
            num_steps = 0
            self.local_episode += 1
            self.global_episode.value += 1
            self.exp_buffer.clear()
            if self.local_episode % 100 == 0:
                print(f"Agent: {self.n_agent}  episode {self.local_episode}")

            ep_start_time = time.time()
            print("call reset on agent {}".format(self.n_agent))
            state = self.env_wrapper.reset()
            print(state.shape)
            print("called reset on agent {}".format(self.n_agent))
            self.ou_noise.reset()
            self.env_wrapper.env.resume_simulator()
            done = False
            angle_avg = []
            distance_avg = []
            while not done:
                action = self.actor.get_action(state)
                if self.agent_type == "supervisor":
                    action = self.env_wrapper.env.get_supervised_action()
                elif self.agent_type == "exploration":
                    action = self.ou_noise.get_action(action, num_steps)
                    action = action.squeeze(0)
                else:
                    action = action.detach().cpu().numpy().flatten()
                next_state, reward, done = self.env_wrapper.step(action)
                angle_avg.append(state[0])
                distance_avg.append(math.hypot(state[1], state[2]))
                episode_reward += reward

                state = self.env_wrapper.normalise_state(state)
                reward = self.env_wrapper.normalise_reward(reward)

                self.exp_buffer.append((state, action, reward))

                # We need at least N steps in the experience buffer before we can compute Bellman
                # rewards and add an N-step experience to replay memory
                if len(self.exp_buffer) >= self.config['n_step_returns']:
                    state_0, action_0, reward_0 = self.exp_buffer.popleft()
                    discounted_reward = reward_0
                    gamma = self.config['discount_rate']
                    for (_, _, r_i) in self.exp_buffer:
                        discounted_reward += r_i * gamma
                        gamma *= self.config['discount_rate']
                    if not replay_queue.full():
                        replay_queue.put([
                            state_0, action_0, discounted_reward, next_state,
                            done, gamma
                        ])

                state = next_state

                if done or num_steps == self.max_steps:
                    print("agent {} done steps: {}/{}".format(
                        self.n_agent, num_steps, self.max_steps))
                    # add rest of experiences remaining in buffer
                    while len(self.exp_buffer) != 0:
                        #print("agent {} exp_buffer_len {}".format(self.n_agent, len(self.exp_buffer)))
                        state_0, action_0, reward_0 = self.exp_buffer.popleft()
                        discounted_reward = reward_0
                        gamma = self.config['discount_rate']
                        for (_, _, r_i) in self.exp_buffer:
                            #print("agent {} exp_buffer_len {}".format(self.n_agent, len(self.exp_buffer)))
                            discounted_reward += r_i * gamma
                            gamma *= self.config['discount_rate']
                        replay_queue.put([
                            state_0, action_0, discounted_reward, next_state,
                            done, gamma
                        ])
                    break

                num_steps += 1

            #print("agent {} finished if".format(self.n_agent))
            # Log metrics
            step = update_step.value
            if self.agent_type == "exploitation":
                self.logger.scalar_summary("agent/angle",
                                           np.rad2deg(np.mean(angle_avg)),
                                           step)
                self.logger.scalar_summary("agent/angle_var",
                                           np.rad2deg(np.var(angle_avg)), step)
                self.logger.scalar_summary("agent/distance",
                                           np.mean(distance_avg), step)
                self.logger.scalar_summary("agent/distance_var",
                                           np.var(distance_avg), step)
                observation_image = self.env_wrapper.env.get_current_observation_image(
                )
                if num_steps == self.max_steps:
                    self.logger.image_summar("agent/observation_end",
                                             observation_image, num_steps)
                else:
                    self.logger.image_summar(
                        "agent/observation_p_{:2.3f}".format(
                            discounted_reward), observation_image, num_steps)

            self.logger.scalar_summary("agent/reward", episode_reward, step)
            self.logger.scalar_summary("agent/episode_timing",
                                       time.time() - ep_start_time, step)

            # Saving agent
            if self.local_episode % self.num_episode_save == 0 or episode_reward > best_reward:
                if episode_reward > best_reward:
                    best_reward = episode_reward
                self.save(
                    f"local_episode_{self.local_episode}_reward_{best_reward:4f}"
                )
                print("reward is: {} step: {} ".format(episode_reward, step))

            rewards.append(episode_reward)
            if (self.agent_type == "exploration"
                    or self.agent_type == "supervisor"
                ) and self.local_episode % self.config['update_agent_ep'] == 0:
                self.update_actor_learner(learner_w_queue)

        # while not replay_queue.empty():
        #     replay_queue.get()

        # Save replay from the first agent only
        # if self.n_agent == 0:
        #    self.save_replay_gif()

        #print(f"Agent {self.n_agent} done.")

    def save(self, checkpoint_name):
        last_path = f"{self.log_dir}"
        process_dir = f"{self.log_dir}/agent_{self.n_agent}"
        if not os.path.exists(process_dir):
            os.makedirs(process_dir)
        if not os.path.exists(last_path):
            os.makedirs(last_path)
        model_fn = f"{process_dir}/{checkpoint_name}.pt"
        torch.save(self.actor, model_fn)
        model_fn = f"{last_path}/best.pt"
        torch.save(self.actor, model_fn)

    def save_replay_gif(self):
        dir_name = "replay_render"
        if not os.path.exists(dir_name):
            os.makedirs(dir_name)

        state = self.env_wrapper.reset()
        self.env_wrapper.env.resume_simulator()
        for step in range(self.max_steps):
            action = self.actor.get_action(state)
            action = action.cpu().detach().numpy()
            next_state, reward, done = self.env_wrapper.step(action)
            img = self.env_wrapper.render()
            plt.imsave(fname=f"{dir_name}/{step}.png", arr=img)
            state = next_state
            if done:
                break

        fn = f"{self.config['env']}-{self.config['model']}-{step}.gif"
        make_gif(dir_name, f"{self.log_dir}/{fn}")
        shutil.rmtree(dir_name, ignore_errors=False, onerror=None)
        print("fig saved to ", f"{self.log_dir}/{fn}")
Exemplo n.º 45
0
import time

from utils.network import check_internet_connected, start_subprocess_to_login_csust_bg
from utils.config import load_config
from utils.tips import info
from utils.logger import Logger

from utils.wifi import LinkWifi

wifi = LinkWifi()
logger = Logger('./out.log')

config = load_config('config/keep_network_connect.json')
logger.write(info(f"config: {config}\n"))

while True:
    # 判断是否已连接到网络
    if check_internet_connected() is False:
        
        # 连接WiFi
        if wifi.connect_wifi('csust-bg') == wifi.status('connected'):
            logger.write(info("connecting"))
            
            # 启动子进程,登录csust-bg
            if start_subprocess_to_login_csust_bg(config['start_process'], config['check_max_line']) is True:
                logger.write(info("network has been connected!"))
            else:
                logger.write(info("connect failed!")) # 运行出现错误,或者尝试次数过多

    # 等待N秒
    time.sleep(config['check_per_second'])
Exemplo n.º 46
0
    # Memory
    memory = Memory(args.mem_size, state_dim, action_dim, args)

    # Algorithm
    drla = NASReptiLe(state_dim, action_dim, max_action, args)

    # Action noise
    a_noise = GaussianNoise(action_dim, sigma=args.gauss_sigma)

    # Logger
    fields = [
        "eval_score", "total_steps", "train_scores", "sampled_archi",
        "log_alphas"
    ]
    logger = Logger(args.output, fields)

    # Train
    ite = 0
    total_steps = 0
    while total_steps < args.max_steps:

        ite += 1

        fitness = []
        c_losses = []
        a_losses = []
        pop = drla.sample_pop(args.pop_size)

        # Update actors and critic
        if total_steps >= args.start_steps:
Exemplo n.º 47
0
def main():
    # Train model
    best_val_loss = np.inf
    best_epoch = 0

    # AL grouped must use controlled training dataset
    train_dataset = OneGraphDataset.load_one_graph_data(
        'train_causal_vel_' + args.suffix,
        train_data_min_max=None,
        size=args.train_size,
        self_loop=args.self_loop,
        control=args.grouped,
        control_nodes=args.input_atoms,
        variations=args.variations,
        need_grouping=args.need_grouping)

    if args.val_grouped:
        # To see control loss, val and test should be grouped
        valid_dataset = OneGraphDataset.load_one_graph_data(
            'valid_causal_vel_' + args.val_suffix,
            train_data_min_max=[train_dataset.mins, train_dataset.maxs],
            size=args.val_size,
            self_loop=args.self_loop,
            control=True,
            control_nodes=args.input_atoms,
            variations=args.val_variations,
            need_grouping=args.val_need_grouping)
        valid_sampler = RandomPytorchSampler(valid_dataset)
        valid_data_loader = DataLoader(valid_dataset,
                                       batch_size=args.val_bs,
                                       shuffle=False,
                                       sampler=valid_sampler)

        # test_data = load_one_graph_data(
        #     'test_'+args.val_suffix, size=args.test_size, self_loop=args.self_loop, control=True, control_nodes=args.input_atoms, variations=4)
        # test_sampler = RandomPytorchSampler(test_data)
        # test_data_loader = DataLoader(
        #     test_data, batch_size=args.val_bs, shuffle=False, sampler=test_sampler)
    else:
        valid_dataset = OneGraphDataset.load_one_graph_data(
            'valid_causal_vel_' + args.val_suffix,
            train_data_min_max=[train_dataset.mins, train_dataset.maxs],
            size=args.val_size,
            self_loop=args.self_loop,
            control=False)
        valid_data_loader = DataLoader(valid_dataset,
                                       batch_size=args.val_bs,
                                       shuffle=True)
        # test_data = load_one_graph_data(
        #     'test_'+args.val_suffix, size=args.val_size, self_loop=args.self_loop, control=False)
        # test_data_loader = DataLoader(
        #     test_data, batch_size=args.val_bs, shuffle=True)

    if args.sampler == 'random':
        sampler = RandomDatasetSampler(train_dataset, args)
    elif args.sampler == 'uncertainty':
        sampler = MaximalEntropyDatasetSampler(train_dataset, args)
    else:
        print('Only random and uncertainty samplers are supported for now!')

    logger = Logger(save_folder)

    print('Doing initial validation before training...')
    val_control(args, log_prior, logger, save_folder, valid_data_loader, -1,
                decoder, rel_rec, rel_send, scheduler)

    data_idx = []
    nodes = []
    # Start with one group of each node
    for i in range(args.input_atoms):
        # group_size=args.variations
        new_data_idx = sampler.sample([i], args.variations, 200)
        uncertain_nodes = torch.LongTensor([i] * args.variations * 200).cuda()
        data_idx.append(new_data_idx)
        nodes.append(uncertain_nodes)

    data_idx = torch.cat(data_idx)
    nodes = torch.cat(nodes)
    al_train_dataset = ALIndexDataset(train_dataset, data_idx, nodes)

    # while len(al_train_dataset.idxs) < args.budget:
    for epoch in range(500):
        # epoch = int(len(al_train_dataset)/args.log_data_size)
        # Sampler batch_size is fixed to be #value variations for a node
        # control_nodes = sampler.criterion(decoder.rel_graph, k=args.topk)
        # new_data_idx = sampler.sample(
        #     control_nodes, args.variations, args.sample_num_groups)
        # uncertain_nodes = torch.LongTensor(
        #     control_nodes).repeat_interleave(args.variations*args.sample_num_groups).cuda()
        # al_train_dataset.update(new_data_idx, uncertain_nodes)

        # with open(os.path.join(save_folder, str(epoch)+'_queries.pt'), 'wb') as f:
        #     torch.save(
        #         [al_train_dataset.nodes, al_train_dataset.idxs], f)
        #     print('sampled nodes this episode',
        #           control_nodes, new_data_idx, len(al_train_dataset))

        # Only need to be a normal dataloader since the grouping and indexing are already implemented in al_train_dataset. Just make sure shuffle=False.
        train_data_loader = DataLoader(al_train_dataset,
                                       batch_size=args.train_bs,
                                       shuffle=False)

        # for j in range(10):
        # print('epoch and j', epoch, j)
        nll, nll_lasttwo, kl, mse, control_constraint_loss, lr, rel_graphs, rel_graphs_grad, a, b, c, d, e, f = train_control(
            args, log_prior, optimizer, save_folder, train_data_loader,
            valid_data_loader, decoder, rel_rec, rel_send, epoch)

        if epoch % args.train_log_freq == 0:
            logger.log('train',
                       decoder,
                       epoch,
                       nll,
                       nll_lasttwo,
                       kl=kl,
                       mse=mse,
                       control_constraint_loss=control_constraint_loss,
                       lr=lr,
                       rel_graphs=rel_graphs,
                       rel_graphs_grad=rel_graphs_grad,
                       msg_hook_weights=a,
                       nll_train_lasttwo=b,
                       nll_train_lasttwo_5=c,
                       nll_train_lasttwo_10=d,
                       nll_train_lasttwo__1=e,
                       nll_train_lasttwo_1=f)

        if epoch % args.val_log_freq == 0:
            _ = val_control(args, log_prior, logger, save_folder,
                            valid_data_loader, epoch, decoder, rel_rec,
                            rel_send, scheduler)
        scheduler.step()

    print("Optimization Finished!")
    print("Best Epoch: {:04d}".format(logger.best_epoch))
    if args.save_folder:
        print("Best Epoch: {:04d}".format(logger.best_epoch), file=meta_file)
        meta_file.flush()

    test_control(test_data_loader)
    if meta_file is not None:
        print(save_folder)
        meta_file.close()
Exemplo n.º 48
0
 def __init__(self):
     self.logger = Logger(self.__class__.__name__ or __name__,
                          PathMgr.get_log_path())
Exemplo n.º 49
0
def mcapi_playback(name_interface):
    """Main function that calibrates the robot, get it into a default waiting position then launch
    the main control loop once the user has pressed the Enter key

    Args:
        name_interface (string): name of the interface that is used to communicate with the robot
    """
    name_replay = "/home/odri/git/thomasCbrs/log_eval/test_3/06_nl/"
    # name_replay = "/home/odri/git/thomasCbrs/log_eval/vmax_nl/"
    # replay_q = np.loadtxt(name_replay + "_q.dat", delimiter=" ")
    # replay_v = np.loadtxt(name_replay + "_v.dat", delimiter=" ")
    # replay_tau = np.loadtxt(name_replay + "_tau.dat", delimiter=" ")
    qtsid_full = np.load(name_replay + "qtsid.npy" , allow_pickle = True)
    vtsid_full = np.load(name_replay + "vtsid.npy" , allow_pickle = True)
    tau_ff = np.load(name_replay + "torques_ff.npy" , allow_pickle = True)
    replay_q = qtsid_full[7:,:].transpose()
    replay_v = vtsid_full[6:,:].transpose()
    replay_tau = tau_ff.transpose()

    N_SIMULATION = replay_q.shape[0]

    # Default position after calibration
    # q_init = replay_q[0, 1:]
    q_init = replay_q[0, :]

    if SIMULATION:
        device = PyBulletSimulator()
        qc = None
    else:
        device = Solo12(name_interface, dt=DT)
        qc = QualisysClient(ip="140.93.16.160", body_id=0)

    if LOGGING:
        logger = Logger(device, qualisys=qc, logSize=N_SIMULATION)

    # Number of motors
    nb_motors = device.nb_motors

    # Initiate communication with the device and calibrate encoders
    if SIMULATION:
        device.Init(calibrateEncoders=True, q_init=q_init, envID=0,
                    use_flat_plane=True, enable_pyb_GUI=True, dt=DT)
    else:
        device.Init(calibrateEncoders=True, q_init=q_init)

        # Wait for Enter input before starting the control loop
        put_on_the_floor(device, q_init)

    # CONTROL LOOP ***************************************************
    t = 0.0
    t_max = (N_SIMULATION-1) * DT
    i = 0

    P = 7 * np.ones(12)
    D = 0.5 * np.ones(12)
    q_des = np.zeros(12)
    v_des = np.zeros(12)
    tau_ff = np.zeros(12)

    while ((not device.hardware.IsTimeout()) and (t < t_max)):

        device.UpdateMeasurment()  # Retrieve data from IMU and Motion capture

        # Set desired quantities for the actuators
        device.SetDesiredJointPDgains(P, D)
        # device.SetDesiredJointPosition(replay_q[i, 1:])
        # device.SetDesiredJointVelocity(replay_v[i, 1:])
        # device.SetDesiredJointTorque(replay_tau[i, 1:])
        device.SetDesiredJointPosition(replay_q[i, :])
        device.SetDesiredJointVelocity(replay_v[i, :])
        device.SetDesiredJointTorque(replay_tau[i, :])

        # Call logger
        if LOGGING:
            logger.sample(device, qualisys=qc)

        # Send command to the robot
        device.SendCommand(WaitEndOfCycle=True)
        if ((device.cpt % 1000) == 0):
            device.Print()

        t += DT
        i += 1

    # DAMPING TO GET ON THE GROUND PROGRESSIVELY *********************
    t = 0.0
    t_max = 2.5
    while ((not device.hardware.IsTimeout()) and (t < t_max)):

        device.UpdateMeasurment()  # Retrieve data from IMU and Motion capture

        # Set desired quantities for the actuators
        device.SetDesiredJointPDgains(np.zeros(12), 0.1 * np.ones(12))
        device.SetDesiredJointPosition(np.zeros(12))
        device.SetDesiredJointVelocity(np.zeros(12))
        device.SetDesiredJointTorque(np.zeros(12))

        # Send command to the robot
        device.SendCommand(WaitEndOfCycle=True)
        if ((device.cpt % 1000) == 0):
            device.Print()

        t += DT

    # FINAL SHUTDOWN *************************************************

    # Whatever happened we send 0 torques to the motors.
    device.SetDesiredJointTorque([0]*nb_motors)
    device.SendCommand(WaitEndOfCycle=True)

    if device.hardware.IsTimeout():
        print("Masterboard timeout detected.")
        print("Either the masterboard has been shut down or there has been a connection issue with the cable/wifi.")
    device.hardware.Stop()  # Shut down the interface between the computer and the master board

    # Save the logs of the Logger object
    if LOGGING:
        logger.saveAll()
        print("Log saved")

    if SIMULATION:
        # Disconnect the PyBullet server (also close the GUI)
        device.Stop()

    print("End of script")
    quit()
Exemplo n.º 50
0
class BaseDAO(object):
    def __init__(self):
        self.logger = Logger(self.__class__.__name__ or __name__,
                             PathMgr.get_log_path())

    @staticmethod
    def get_connection():
        db_config = ConfigMgr.get_db_config()
        return mysql.connector.connect(host=db_config['host'],
                                       user=db_config['user'],
                                       password=db_config['password'],
                                       database=db_config['database'])

    @staticmethod
    def python_value_to_sql_value(val):
        if val is not None:
            if type(val) is float:
                return '{:.5f}'.format(val)
            else:
                return str(val)
        else:
            return 'null'

    @staticmethod
    def mysql_format(template, *args):
        mysql_args = map(BaseDAO.python_value_to_sql_value, args)
        return template.format(*mysql_args)

    def select(self, query, cursor=None):
        #self.logger.info('query:%s' % query)
        conn = None
        if cursor is None:
            conn = BaseDAO.get_connection()
            cursor = conn.cursor()
        try:
            cursor.execute(query)
            rows = cursor.fetchall()
            return rows
        except Exception as e:
            error_message = "Query:{}, error message: {}, Stack Trace: {}".format(
                query, str(e), traceback.format_exc())
            self.logger.exception(error_message)
        finally:
            if conn:
                conn.close()

    def execute_query(self, query, cursor=None):
        #self.logger.info('query:%s' % query)
        conn = None
        if cursor is None:
            conn = BaseDAO.get_connection()
            cursor = conn.cursor()
        try:
            cursor.execute(query)
            if conn:
                conn.commit()
        except mysql.connector.IntegrityError:
            pass
        except Exception as e:
            error_message = "Query:{}, error message: {}, Stack Trace: {}".format(
                query, str(e), traceback.format_exc())
            self.logger.exception(error_message)
        finally:
            if conn:
                conn.close()

    def execute_query_list(self, query_list):
        conn = BaseDAO.get_connection()
        cursor = conn.cursor()
        try:
            query_for_log_exception = None
            for query in query_list:
                #self.logger.info('query:%s' % query)
                query_for_log_exception = query
                cursor.execute(query)
            conn.commit()
        except Exception as e:
            error_message = "Query:{}, error message: {}, Stack Trace: {}".format(
                query_for_log_exception, str(e), traceback.format_exc())
            self.logger.exception(error_message)
        finally:
            conn.close()
Exemplo n.º 51
0
    if level == "debug":
        return logging.DEBUG
    if level == "info":
        return logging.INFO
    if level == "warn":
        return logging.WARN
    if level == "error":
        return logging.ERROR


INIT_FILE_PATH = "config/init.ini"
NUMBER_OF_FRAMES = -1
CAMERA_NAME = socket.gethostname()

app_conf = AppConf(INIT_FILE_PATH)
logger = Logger(level=get_conf_log_level(app_conf.log_level))

cap = cv2.VideoCapture(0)
count = NUMBER_OF_FRAMES
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
    while count > 0 or NUMBER_OF_FRAMES == -1:
        try:
            # Capture frame-by-frame
            ret, frame = cap.read()
            if ret:
                cv2.imshow('frame', frame)
                vi = V3ioImage(logger, frame, CAMERA_NAME)
                img_json = vi.image_json
                logger.debug(img_json)
                future = {
                    executor.submit(
Exemplo n.º 52
0
def main_worker(gpu_idx, configs):
    configs.gpu_idx = gpu_idx
    configs.device = torch.device('cpu' if configs.gpu_idx is None else 'cuda:{}'.format(configs.gpu_idx))

    if configs.distributed:
        if configs.dist_url == "env://" and configs.rank == -1:
            configs.rank = int(os.environ["RANK"])
        if configs.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            configs.rank = configs.rank * configs.ngpus_per_node + gpu_idx

        dist.init_process_group(backend=configs.dist_backend, init_method=configs.dist_url,
                                world_size=configs.world_size, rank=configs.rank)
        configs.subdivisions = int(64 / configs.batch_size / configs.ngpus_per_node)
    else:
        configs.subdivisions = int(64 / configs.batch_size)

    configs.is_master_node = (not configs.distributed) or (
            configs.distributed and (configs.rank % configs.ngpus_per_node == 0))

    if configs.is_master_node:
        logger = Logger(configs.logs_dir, configs.saved_fn)
        logger.info('>>> Created a new logger')
        logger.info('>>> configs: {}'.format(configs))
        tb_writer = SummaryWriter(log_dir=os.path.join(configs.logs_dir, 'tensorboard'))
    else:
        logger = None
        tb_writer = None

    # model
    model = create_model(configs)

    # load weight from a checkpoint
    if configs.pretrained_path is not None:
        assert os.path.isfile(configs.pretrained_path), "=> no checkpoint found at '{}'".format(configs.pretrained_path)
        model.load_state_dict(torch.load(configs.pretrained_path))
        if logger is not None:
            logger.info('loaded pretrained model at {}'.format(configs.pretrained_path))

    # resume weights of model from a checkpoint
    if configs.resume_path is not None:
        assert os.path.isfile(configs.resume_path), "=> no checkpoint found at '{}'".format(configs.resume_path)
        model.load_state_dict(torch.load(configs.resume_path))
        if logger is not None:
            logger.info('resume training model from checkpoint {}'.format(configs.resume_path))

    # Data Parallel
    model = make_data_parallel(model, configs)

    # Make sure to create optimizer after moving the model to cuda
    optimizer = create_optimizer(configs, model)
    lr_scheduler = create_lr_scheduler(optimizer, configs)
    configs.step_lr_in_epoch = True if configs.lr_type in ['multi_step'] else False

    # resume optimizer, lr_scheduler from a checkpoint
    if configs.resume_path is not None:
        utils_path = configs.resume_path.replace('Model_', 'Utils_')
        assert os.path.isfile(utils_path), "=> no checkpoint found at '{}'".format(utils_path)
        utils_state_dict = torch.load(utils_path, map_location='cuda:{}'.format(configs.gpu_idx))
        optimizer.load_state_dict(utils_state_dict['optimizer'])
        lr_scheduler.load_state_dict(utils_state_dict['lr_scheduler'])
        configs.start_epoch = utils_state_dict['epoch'] + 1

    if configs.is_master_node:
        num_parameters = get_num_parameters(model)
        logger.info('number of trained parameters of the model: {}'.format(num_parameters))

    if logger is not None:
        logger.info(">>> Loading dataset & getting dataloader...")
    # Create dataloader
    train_dataloader, train_sampler = create_train_dataloader(configs)
    if logger is not None:
        logger.info('number of batches in training set: {}'.format(len(train_dataloader)))

    if configs.evaluate:
        val_dataloader = create_val_dataloader(configs)
        precision, recall, AP, f1, ap_class = evaluate_mAP(val_dataloader, model, configs, None)
        print('Evaluate - precision: {}, recall: {}, AP: {}, f1: {}, ap_class: {}'.format(precision, recall, AP, f1,
                                                                                          ap_class))
        print('mAP {}'.format(AP.mean()))
        return

    for epoch in range(configs.start_epoch, configs.num_epochs + 1):
        if logger is not None:
            logger.info('{}'.format('*-' * 40))
            logger.info('{} {}/{} {}'.format('=' * 35, epoch, configs.num_epochs, '=' * 35))
            logger.info('{}'.format('*-' * 40))
            logger.info('>>> Epoch: [{}/{}]'.format(epoch, configs.num_epochs))

        if configs.distributed:
            train_sampler.set_epoch(epoch)
        # train for one epoch
        train_one_epoch(train_dataloader, model, optimizer, lr_scheduler, epoch, configs, logger, tb_writer)
        if not configs.no_val:
            val_dataloader = create_val_dataloader(configs)
            print('number of batches in val_dataloader: {}'.format(len(val_dataloader)))
            precision, recall, AP, f1, ap_class = evaluate_mAP(val_dataloader, model, configs, logger)
            val_metrics_dict = {
                'precision': precision.mean(),
                'recall': recall.mean(),
                'AP': AP.mean(),
                'f1': f1.mean(),
                'ap_class': ap_class.mean()
            }
            if tb_writer is not None:
                tb_writer.add_scalars('Validation', val_metrics_dict, epoch)

        # Save checkpoint
        if configs.is_master_node and ((epoch % configs.checkpoint_freq) == 0):
            model_state_dict, utils_state_dict = get_saved_state(model, optimizer, lr_scheduler, epoch, configs)
            save_checkpoint(configs.checkpoints_dir, configs.saved_fn, model_state_dict, utils_state_dict, epoch)

        if not configs.step_lr_in_epoch:
            lr_scheduler.step()
            if tb_writer is not None:
                tb_writer.add_scalar('LR', lr_scheduler.get_lr()[0], epoch)

    if tb_writer is not None:
        tb_writer.close()
    if configs.distributed:
        cleanup()
Exemplo n.º 53
0
from adapters.data_adapter import IDataAdapter
from adapters.hist_data_adapter import HistDataAdapter
from adapters.influx_sensordata import InfluxSensorData
from models.arima import train_or_load_ARIMA
from models.autoarima import train_or_load_autoARIMA
from models.expsmoothing import train_or_load_expSmoothing
from models.lstm import train_or_load_LSTM
from models.modelholder import ModelHolder
from preprocessing.imputing import impute_simple_imputer
from preprocessing.moving_average import moving_average
from utils.config import default_config
from utils.logger import Logger
from utils.threading import to_thread
from utils.timer import Timer

logger: Logger = Logger(module_name="Model 1")
script_timer = Timer()

# structure of time series: rows: instances, cols: variables, depth: series of values


def print_header() -> None:
    """
		Prints a header for a more pleasant console user experience. Not that this is at all important,
		but i felt like it. Don't try to change my mind.
	"""
    logger.info(
        "##############################################################")
    logger.info(
        "#       Datascience in Techno-Socio-Economic Systems         #")
    logger.info(
Exemplo n.º 54
0
    def __init__(self):
        ##The top config
        #self.data_root = '/media/hhy/data/USdata/MergePhase1/test_0.3'
        #self.log_dir = '/media/hhy/data/code_results/MILs/MIL_H_Attention'

        self.root = '/remote-home/my/Ultrasound_CV/data/Ruijin/clean'
        self.log_dir = '/remote-home/my/hhy/Ultrasound_MIL/experiments/PLN1/base/fold4'
        if not os.path.exists(self.log_dir):
            os.makedirs(self.log_dir)
        ##training config
        self.lr = 1e-4
        self.epoch = 50
        self.resume = -1
        self.batch_size = 1
        self.net = Attention()
        self.net.cuda()

        self.optimizer = Adam(self.net.parameters(), lr=self.lr)
        self.lrsch = torch.optim.lr_scheduler.MultiStepLR(
            self.optimizer, milestones=[10, 30, 50, 70], gamma=0.5)

        self.logger = Logger(self.log_dir)
        self.train_transform = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.RandomResizedCrop((224, 224)),
            transforms.RandomHorizontalFlip(0.5),
            transforms.RandomVerticalFlip(0.5),
            transforms.ColorJitter(0.25, 0.25, 0.25, 0.25),
            transforms.ToTensor()
        ])
        self.test_transform = transforms.Compose(
            [transforms.Resize((224, 224)),
             transforms.ToTensor()])

        self.label_name = "手术淋巴结情况(0未转移;1转移)"
        self.trainbag = RuijinBags(self.root, [0, 1, 2, 3],
                                   self.train_transform,
                                   label_name=self.label_name)
        self.testbag = RuijinBags(self.root, [4],
                                  self.test_transform,
                                  label_name=self.label_name)

        train_label_list = list(
            map(lambda x: int(x['label']), self.trainbag.patient_info))
        pos_ratio = sum(train_label_list) / len(train_label_list)
        print(pos_ratio)
        train_weight = [(1 - pos_ratio) if x > 0 else pos_ratio
                        for x in train_label_list]

        # self.train_sampler = WeightedRandomSampler(weights=train_weight, num_samples=len(self.trainbag))
        self.train_loader = DataLoader(self.trainbag,
                                       batch_size=self.batch_size,
                                       num_workers=8,
                                       shuffle=True)
        self.val_loader = DataLoader(self.testbag,
                                     batch_size=self.batch_size,
                                     shuffle=False,
                                     num_workers=8)

        if self.resume > 0:
            self.net, self.optimizer, self.lrsch, self.loss, self.global_step = self.logger.load(
                self.net, self.optimizer, self.lrsch, self.loss, self.resume)
        else:
            self.global_step = 0

        # self.trainer = MTTrainer(self.net, self.optimizer, self.lrsch, self.loss, self.train_loader, self.val_loader, self.logger, self.global_step, mode=2)
        self.trainer = MILTrainer(self.net, self.optimizer, self.lrsch, None,
                                  self.train_loader, self.val_loader,
                                  self.logger, self.global_step)
Exemplo n.º 55
0
def main(args):
    args = prepare_log_dir(args)

    # set dtype for training
    chainer.global_config.dtype = args.dtype

    train_dataset = BaseImageDataset(
        args.train_file,
        args.image_size,
        root=os.path.dirname(args.train_file),
        dtype=chainer.get_dtype(),
    )

    validation_dataset = BaseImageDataset(
        args.val_file,
        args.image_size,
        root=os.path.dirname(args.val_file),
        dtype=chainer.get_dtype(),
    )

    train_iter = MultiprocessIterator(train_dataset, batch_size=args.batch_size, shuffle=True)
    validation_iter = MultiprocessIterator(validation_dataset, batch_size=args.batch_size, repeat=False)

    net = HandwritingNet()
    model = L.Classifier(net, label_key='has_text')

    tensorboard_handle = SummaryWriter(log_dir=args.log_dir)

    optimizer = Adam(alpha=args.learning_rate)
    optimizer.setup(model)
    if args.save_gradient_information:
        optimizer.add_hook(
            TensorboardGradientPlotter(tensorboard_handle, args.log_interval),
        )

    # log train information everytime we encouter a new epoch or args.log_interval iterations have been done
    log_interval_trigger = (
        lambda trainer:
        (trainer.updater.is_new_epoch or trainer.updater.iteration % args.log_interval == 0)
        and trainer.updater.iteration > 0
    )

    updater = StandardUpdater(train_iter, optimizer, device=args.gpu)
    trainer = Trainer(updater, (args.num_epoch, 'epoch'), out=args.log_dir)

    data_to_log = {
        'log_dir': args.log_dir,
        'image_size': args.image_size,
        # 'num_layers': args.num_layers,
        'keep_aspect_ratio': train_dataset.keep_aspect_ratio,
        'net': get_import_info(net),
    }

    for argument in filter(lambda x: not x.startswith('_'), dir(args)):
        data_to_log[argument] = getattr(args, argument)

    def backup_train_config(stats_cpu):
        iteration = stats_cpu.pop('iteration')
        epoch = stats_cpu.pop('epoch')
        elapsed_time = stats_cpu.pop('elapsed_time')

        for key, value in stats_cpu.items():
            tensorboard_handle.add_scalar(key, value, iteration)

        if iteration == args.log_interval:
            stats_cpu.update(data_to_log)

        stats_cpu.update({
            "epoch": epoch,
            "iteration": iteration,
            "elapsed_time": elapsed_time,
        })

    trainer.extend(
        extensions.snapshot_object(net, net.__class__.__name__ + '_{.updater.iteration}.npz'),
        trigger=lambda trainer: trainer.updater.is_new_epoch or trainer.updater.iteration % args.snapshot_interval == 0,
    )

    trainer.extend(
        extensions.snapshot(filename='trainer_snapshot', autoload=args.resume is not None),
        trigger=(args.snapshot_interval, 'iteration')
    )

    trainer.extend(
        TensorboardEvaluator(
            validation_iter,
            model,
            device=args.gpu,
            tensorboard_handle=tensorboard_handle
        ),
        trigger=(args.test_interval, 'iteration'),
    )

    logger = Logger(
        os.path.dirname(os.path.realpath(__file__)),
        args.log_dir,
        postprocess=backup_train_config,
        trigger=log_interval_trigger,
        exclusion_filters=['*logs*', '*.pyc', '__pycache__', '.git*'],
        resume=args.resume is not None,
    )

    trainer.extend(logger, trigger=log_interval_trigger)
    trainer.extend(
        extensions.PrintReport(
            ['epoch', 'iteration', 'main/loss', 'main/accuracy', 'validation/main/accuracy'],
            log_report='Logger',
        ),
        trigger=log_interval_trigger,
    )

    trainer.extend(extensions.ExponentialShift('alpha', 0.1, optimizer=optimizer), trigger=(10, 'epoch'))
    trainer.extend(extensions.ProgressBar(update_interval=10))

    trainer.run()
Exemplo n.º 56
0
class Config(object):
    '''
    This config is for single modality BM classification
    '''
    def __init__(self):
        ##The top config
        #self.data_root = '/media/hhy/data/USdata/MergePhase1/test_0.3'
        #self.log_dir = '/media/hhy/data/code_results/MILs/MIL_H_Attention'

        self.root = '/remote-home/my/Ultrasound_CV/data/Ruijin/clean'
        self.log_dir = '/remote-home/my/hhy/Ultrasound_MIL/experiments/PLN1/base/fold4'
        if not os.path.exists(self.log_dir):
            os.makedirs(self.log_dir)
        ##training config
        self.lr = 1e-4
        self.epoch = 50
        self.resume = -1
        self.batch_size = 1
        self.net = Attention()
        self.net.cuda()

        self.optimizer = Adam(self.net.parameters(), lr=self.lr)
        self.lrsch = torch.optim.lr_scheduler.MultiStepLR(
            self.optimizer, milestones=[10, 30, 50, 70], gamma=0.5)

        self.logger = Logger(self.log_dir)
        self.train_transform = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.RandomResizedCrop((224, 224)),
            transforms.RandomHorizontalFlip(0.5),
            transforms.RandomVerticalFlip(0.5),
            transforms.ColorJitter(0.25, 0.25, 0.25, 0.25),
            transforms.ToTensor()
        ])
        self.test_transform = transforms.Compose(
            [transforms.Resize((224, 224)),
             transforms.ToTensor()])

        self.label_name = "手术淋巴结情况(0未转移;1转移)"
        self.trainbag = RuijinBags(self.root, [0, 1, 2, 3],
                                   self.train_transform,
                                   label_name=self.label_name)
        self.testbag = RuijinBags(self.root, [4],
                                  self.test_transform,
                                  label_name=self.label_name)

        train_label_list = list(
            map(lambda x: int(x['label']), self.trainbag.patient_info))
        pos_ratio = sum(train_label_list) / len(train_label_list)
        print(pos_ratio)
        train_weight = [(1 - pos_ratio) if x > 0 else pos_ratio
                        for x in train_label_list]

        # self.train_sampler = WeightedRandomSampler(weights=train_weight, num_samples=len(self.trainbag))
        self.train_loader = DataLoader(self.trainbag,
                                       batch_size=self.batch_size,
                                       num_workers=8,
                                       shuffle=True)
        self.val_loader = DataLoader(self.testbag,
                                     batch_size=self.batch_size,
                                     shuffle=False,
                                     num_workers=8)

        if self.resume > 0:
            self.net, self.optimizer, self.lrsch, self.loss, self.global_step = self.logger.load(
                self.net, self.optimizer, self.lrsch, self.loss, self.resume)
        else:
            self.global_step = 0

        # self.trainer = MTTrainer(self.net, self.optimizer, self.lrsch, self.loss, self.train_loader, self.val_loader, self.logger, self.global_step, mode=2)
        self.trainer = MILTrainer(self.net, self.optimizer, self.lrsch, None,
                                  self.train_loader, self.val_loader,
                                  self.logger, self.global_step)

    def update(self, dicts):
        for k, v in dicts.items():
            self.__setattr__(k, v)

    @staticmethod
    def auto_argparser(cfg, description=None):
        """Generate argparser from config file automatically (experimental)
        """
        # partial_parser = ArgumentParser(description=description)
        # partial_parser.add_argument('config', help='config file path')
        # parsed, unparsed_args = partial_parser.parse_known_args()
        # cfg = Config.fromfile(parsed.config)
        parser = ArgumentParser(description=description)
        add_args(parser, cfg)
        parsed_args = parser.parse_args()
        update_cfg(cfg, parsed_args)
        return cfg
Exemplo n.º 57
0
#!/usr/bin/env python3

import os
from utils.logger import Logger
from utils.config import Config
from utils.environment import Env
from utils.arguments import get_arguments

__appName__ = 'TrailerTech'
__author__ = 'JsAddiction'
__version__ = '0.1.0'
__description__ = 'Download Trailers for your movie library.'

CONFIG_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)),
                           'settings.ini')
LOG_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)),
                        'TrailerTech.log')
env = Env()
args = get_arguments(__appName__, __description__, __version__)
config = Config(CONFIG_PATH)
logger = Logger(LOG_PATH,
                config.log_level,
                config.log_to_file,
                quiet=args.quiet)
Exemplo n.º 58
0
from asyncio import sleep, gather, create_task
from bs4 import BeautifulSoup as bs4
from random import SystemRandom

import models.errors as errors
from models.pydantic import BookInfo
from utils.logger import Logger

logger = Logger.get(__file__)

class WebPage:
    """
    Представляет собой Web-страницу
    """

    __headers = {
        'Host' : 'elibrary.asu.ru',
        'Connection' : 'close',
        'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36',
        'Accept' : 'image/webp,image/apng,image/*,*/*;q=0.8',
        'Accept-Encoding' : 'gzip, deflate',
        'Accept-Language' : 'en-GB,en;q=0.9,ru-RU;q=0.8,ru;q=0.7,en-US;q=0.6'
    }

    def __init__(self, session, link):
        """
        session = aiohttp.Session
        link = link for book page
        """

        self.session = session
Exemplo n.º 59
0
def train(args):
    # Configuration
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    input_height, input_width = args.input_size

    logger = Logger(log_root='logs/', name=args.logger_name)

    for k, v in args.__dict__.items():
        logger.add_text('configuration', "{}: {}".format(k, v))

    # Dataset
    train_loader, val_loader = get_data_loaders(args)
    batchs_in_val = math.ceil(len(val_loader.dataset) / args.validate_batch)
    print("Train set size:", len(train_loader.dataset))
    print("Val set size:", len(val_loader.dataset))

    # Network
    if args.use_noise is True:
        noise_layers = {
            'crop': '((0.4,0.55),(0.4,0.55))',
            'cropout': '((0.25,0.35),(0.25,0.35))',
            'dropout': '(0.25,0.35)',
            'jpeg': '()',
            'resize': '(0.4,0.6)',
        }  # This is a combined noise used in the paper
    else:
        noise_layers = dict()
    encoder = Encoder(input_height, input_width, args.info_size)
    noiser = Noiser(noise_layers, torch.device('cuda'))
    decoder = Decoder(args.info_size)
    discriminator = Discriminator()
    encoder.cuda()
    noiser.cuda()
    decoder.cuda()
    discriminator.cuda()

    # Optimizers
    optimizer_enc = torch.optim.Adam(encoder.parameters())
    optimizer_dec = torch.optim.Adam(decoder.parameters())
    optimizer_dis = torch.optim.Adam(discriminator.parameters())

    # Training
    dir_save = 'ckpt/{}'.format(logger.log_name)
    os.makedirs(dir_save, exist_ok=True)
    os.makedirs(dir_save + '/images/', exist_ok=True)
    os.makedirs(dir_save + '/models/', exist_ok=True)
    training_losses = defaultdict(AverageLoss)

    info_fix = torch.randint(0, 2, size=(100, args.info_size)).to(device, dtype=torch.float32)
    image_fix = None
    for image, _ in val_loader:
        image_fix = image.cuda()  # 100 images for validate, the first batch
        break
    global_step = 1
    for epoch in range(1, args.epochs + 1):

        # Train one epoch
        for image, _ in train_loader:
            image = image.cuda()
            batch_size = image.shape[0]
            info = torch.randint(0, 2, size=(batch_size, args.info_size)).to(device, dtype=torch.float32)

            encoder.train()
            noiser.train()
            decoder.train()
            discriminator.train()
            # ---------------- Train the discriminator -----------------------------
            optimizer_dis.zero_grad()
            # train on cover
            y_real = torch.ones(batch_size, 1).cuda()
            y_fake = torch.zeros(batch_size, 1).cuda()

            d_on_cover = discriminator(image)
            encoded_image = encoder(image, info)
            d_on_encoded = discriminator(encoded_image.detach())

            if args.relative_loss:
                d_loss_on_cover = F.binary_cross_entropy_with_logits(d_on_cover - torch.mean(d_on_encoded),
                                                                     y_real)
                d_loss_on_encoded = F.binary_cross_entropy_with_logits(d_on_encoded - torch.mean(d_on_cover),
                                                                       y_fake)
                d_loss = d_loss_on_cover + d_loss_on_encoded
            else:
                d_loss_on_cover = F.binary_cross_entropy_with_logits(d_on_cover, y_real)
                d_loss_on_encoded = F.binary_cross_entropy_with_logits(d_on_encoded, y_fake)

                d_loss = d_loss_on_cover + d_loss_on_encoded

            d_loss.backward()
            optimizer_dis.step()

            # --------------Train the generator (encoder-decoder) ---------------------
            optimizer_enc.zero_grad()
            optimizer_dec.zero_grad()

            d_on_cover = discriminator(image)
            encoded_image = encoder(image, info)
            noised_and_cover = noiser([encoded_image, image])
            noised_image = noised_and_cover[0]
            decoded_info = decoder(noised_image)
            d_on_encoded = discriminator(encoded_image)
            if args.relative_loss:
                g_loss_adv = \
                    (F.binary_cross_entropy_with_logits(d_on_encoded - torch.mean(d_on_cover), y_real) +
                     F.binary_cross_entropy_with_logits(d_on_cover - torch.mean(d_on_encoded), y_fake)) * 0.5
                g_loss_enc = F.mse_loss(encoded_image, image)
                g_loss_dec = F.mse_loss(decoded_info, info)
            else:
                g_loss_adv = F.binary_cross_entropy_with_logits(d_on_encoded, y_real)
                g_loss_enc = F.mse_loss(encoded_image, image)
                g_loss_dec = F.mse_loss(decoded_info, info)
            g_loss = args.adversarial_loss_constant * g_loss_adv + \
                     args.encoder_loss_constant * g_loss_enc + \
                     args.decoder_loss_constant * g_loss_dec

            g_loss.backward()
            optimizer_enc.step()
            optimizer_dec.step()

            decoded_rounded = decoded_info.detach().cpu().numpy().round().clip(0, 1)
            bitwise_avg_err = \
                np.sum(np.abs(decoded_rounded - info.detach().cpu().numpy())) / \
                (batch_size * info.shape[1])

            losses = {
                'g_loss': g_loss.item(),
                'g_loss_enc': g_loss_enc.item(),
                'g_loss_dec': g_loss_dec.item(),
                'bitwise_avg_error': bitwise_avg_err,
                'g_loss_adv': g_loss_adv.item(),
                'd_loss_on_cover': d_loss_on_cover.item(),
                'd_loss_on_encoded': d_loss_on_encoded.item(),
                'd_loss': d_loss.item()
            }
            if logger:
                for name, loss in losses.items():
                    logger.add_scalar(name + '_iter', loss, global_step)
                    training_losses[name].update(loss)
            global_step += 1

        if logger:
            logger.add_scalar('d_loss_epoch', training_losses['d_loss'].avg, epoch)
            logger.add_scalar('g_loss_epoch', training_losses['g_loss'].avg, epoch)

        # Validate each epoch
        info_random = torch.randint(0, 2, size=(100, args.info_size)).to(device, dtype=torch.float32)
        image_random = None
        choice = random.randint(0, batchs_in_val - 2)
        # print(choice)
        for i, (image, _) in enumerate(val_loader):
            if i < choice:
                continue
            if image.shape[0] < 100:
                continue
            image_random = image.cuda()  # Grub the first batch
            break

        encoder.eval()
        noiser.eval()
        decoder.eval()
        discriminator.eval()

        encoded_image_random = encoder(image_random, info_random)
        noised_and_cover_random = noiser([encoded_image_random, image_random])
        noised_image_random = noised_and_cover_random[0]
        decoded_info_random = decoder(noised_image_random)

        encoded_image_fix = encoder(image_fix, info_fix)
        noised_and_cover_fix = noiser([encoded_image_fix, image_fix])
        noised_image_fix = noised_and_cover_fix[0]
        decoded_info_fix = decoder(noised_image_fix)

        decoded_rounded_fix = decoded_info_fix.detach().cpu().numpy().round().clip(0, 1)
        bitwise_avg_err_fix = \
            np.sum(np.abs(decoded_rounded_fix - info_fix.detach().cpu().numpy())) / \
            (100 * info_fix.shape[1])

        decoded_rounded_random = decoded_info_random.detach().cpu().numpy().round().clip(0, 1)
        bitwise_avg_err_random = \
            np.sum(np.abs(decoded_rounded_random - info_random.detach().cpu().numpy())) / \
            (100 * info_random.shape[1])

        stack_image_random = exec_val(image_random, encoded_image_random,
                                      os.path.join(dir_save, 'images', 'random_epoch{:0>3d}.png'.format(epoch)))
        stack_image_fix = exec_val(image_fix, encoded_image_fix,
                                   os.path.join(dir_save, 'images', 'fix_epoch{:0>3d}.png'.format(epoch)))
        if logger:
            logger.add_scalar('fix_err_ratio', bitwise_avg_err_fix, epoch)
            logger.add_scalar('random_err_ratio', bitwise_avg_err_random, epoch)
            logger.add_image('image_rand', stack_image_random, epoch)
            logger.add_image('image_fix', stack_image_fix, epoch)
        torch.save(encoder.state_dict(), '{}/models/encoder-epoch{:0>3d}.pth'.format(dir_save, epoch))
        torch.save(decoder.state_dict(), '{}/models/decoder-epoch{:0>3d}.pth'.format(dir_save, epoch))
        if args.use_noise:
            torch.save(noiser.state_dict(), '{}/models/noiser-epoch{:0>3d}.pth'.format(dir_save, epoch))
        torch.save(discriminator.state_dict(), '{}/models/discriminator-epoch{:0>3d}.pth'.format(dir_save, epoch))
Exemplo n.º 60
0
def initialize_logger(config_file):
    Logger().load_configs(config_file)