コード例 #1
0
 def __init__(self):
     super(DQNUnit, self).__init__()
     if config().sim.env.state.type == "simple":
         self.simple_fc = nn.Sequential(nn.Linear(8, 8), nn.ReLU(),
                                        nn.Linear(8, 8), nn.ReLU(),
                                        nn.Linear(8, 4))
     else:
         board_size = config().sim.env.size
         n_actions = 4
         conv_layers = [
             nn.Conv2d(1, 16, 3, stride=2, padding=1),
             nn.ReLU(),
             nn.Conv2d(16, 32, 3, stride=2, padding=1),
             nn.ReLU(),
             nn.Conv2d(32, 32, 3, stride=2, padding=1),
             nn.ReLU(),
             nn.Conv2d(32, 32, 3, stride=2, padding=1),
             nn.ReLU(),
         ]
         out_dim = (board_size, board_size)
         for layer in conv_layers:
             if type(layer) != nn.ReLU:
                 out_dim = output_size_conv2d_layer(out_dim[0], out_dim[1],
                                                    layer)
         self.conv = nn.Sequential(*conv_layers)
         self.fc = nn.Sequential(
             nn.Linear(out_dim, 32),
             nn.ReLU(),
             nn.Linear(32, n_actions),
         )
コード例 #2
0
def evaluate_cnn(dataset, model, criterion, get_semantic_label):
    """
    Runs inference on an autoencoder model to evaluate the mse loss on the
    validation sets. Reports per-class performance to terminal.
    """

    num_classes = config('autoencoder.num_classes')
    batch_size = config('autoencoder.batch_size')
    performance = np.zeros(num_classes)
    for c in range(num_classes):
        with torch.no_grad():
            y_true, y_pred = [], []
            correct, total = 0, 0
            X = dataset[c]
            output = model(X)
            predicted = predictions(output.data)
            total = len(X)
            correct += (predicted == c).sum().item()
            val_acc = correct / total

        performance[c] = val_acc
        val_acc = 0.0

    for c, p in enumerate(performance):
        print('Class {}: {} accuracy'.format(get_semantic_label(c), p))
コード例 #3
0
    def __init__(self, partition, task="target", augment=False):
        """Read in the necessary data from disk.

        For parts 2, 3 and data augmentation, `task` should be "target".
        For source task of part 4, `task` should be "source".

        For data augmentation, `augment` should be True.
        """
        super().__init__()

        if partition not in ["train", "val", "test", "challenge"]:
            raise ValueError("Partition {} does not exist".format(partition))

        np.random.seed(42)
        torch.manual_seed(42)
        random.seed(42)
        self.partition = partition
        self.task = task
        self.augment = augment
        # Load in all the data we need from disk
        if task == "target" or task == "source":
            self.metadata = pd.read_csv(config("csv_file"))
        if self.augment:
            print("Augmented")
            self.metadata = pd.read_csv(config("augmented_csv_file"))
        self.X, self.y = self._load_data()

        self.semantic_labels = dict(
            zip(
                self.metadata[self.metadata.task ==
                              self.task]["numeric_label"],
                self.metadata[self.metadata.task == self.task]
                ["semantic_label"],
            ))
コード例 #4
0
def evaluate_autoencoder(dataset, get_semantic_label, model, criterion):
    """
    Runs inference on an autoencoder model to evaluate the mse loss on the
    validation sets. Reports per-class performance to terminal.
    """
    num_classes = config('autoencoder.num_classes')
    batch_size = config('autoencoder.batch_size')
    performance = np.zeros(num_classes)
    overall_performance = 0
    overall_sample_num = 0
    for c in range(num_classes):
        len_ = 0
        X = dataset[c]
        _, recon = model(X)  #todo: return this to recon instead of temp
        #recon = torch.zeros(temp.shape)
        batch_mse = criterion(recon, X).item()
        performance[c] = batch_mse
        overall_sample_num = overall_sample_num + X.shape[0]
        overall_performance = overall_performance + batch_mse * X.shape[0]
    overall_performance = overall_performance / overall_sample_num
    print('Overall performance: {} mean squared error'.format(
        overall_performance))
    for c, p in enumerate(performance):
        print('Class {}: {} mean squared error'.format(get_semantic_label(c),
                                                       p))
コード例 #5
0
    def default(self, *args, **kargs):
        if len(args) > 1:
            return self.parent.error_page("too many arguments to Keywords")
        elif len(args) < 1:
            return self.parent.error_page("Too few arguments to Keywords")

        try:
            offset = int(kargs.get('offset', 0))
        except ValueError:
            offset = 0

        self.keyword = unquote_plus(args[0])
        print "Key =  %s" % self.keyword

        #remember we're not sure if base_url has a trailing '/' or not...
        if 'Atom' in config("plugins"):
            self.atom_link = config('base_url').rstrip('/') + \
                '/Atom/keywords/' + self.keyword

        entries = get_entries_by_meta('keywords')
        entries = [
            e for e in entries
            if self.keyword in keysplit(e.metadata['keywords'])
        ]
        entries = entries[offset:offset + config("num_entries")]
        return self.parent.render_page(entries, self.keyword, offset)
コード例 #6
0
def main(device=torch.device('cuda:0')):
    # CLI arguments
    parser = arg.ArgumentParser(
        description='We all know what we are doing. Fighting!')
    parser.add_argument("--datasize",
                        "-d",
                        default="small",
                        type=str,
                        help="data size you want to use, small, medium, total")
    # Parsing
    args = parser.parse_args()
    # Data loaders
    datasize = args.datasize
    pathname = "data/nyu.zip"
    tr_loader, va_loader, te_loader = getTrainingValidationTestingData(
        datasize, pathname, batch_size=config("unet.batch_size"))

    # Model
    model = Net()

    # define loss function
    # criterion = torch.nn.L1Loss()

    # Attempts to restore the latest checkpoint if exists
    print("Loading unet...")
    model, start_epoch, stats = utils.restore_checkpoint(
        model, utils.config("unet.checkpoint"))
    acc, loss = utils.evaluate_model(model, te_loader, device)
    # axes = util.make_training_plot()
    print(f'Test Accuracy:{acc}')
    print(f'Test Loss:{loss}')
コード例 #7
0
    def __init__(self, partition, task="target", augment=False):
        '''
            Read in the data from the disk
        '''

        super().__init__()

        if partition not in ["train", "val", "test"]:
            raise ValueError("Partition {} does not exist".format(partition))

        FILEPATH = config("csv_file")
        self.PATH = config("image_path")
        seed = 0
        np.random.seed(seed)  # set the seed for random
        torch.manual_seed(seed)
        random.seed = seed
        self.task = task
        self.partition = partition
        self.metadata = pd.read_csv(
            FILEPATH, converters={'numeric_label': from_np_array})
        self.augment = augment

        if self.augment == False:
            self.metadata = pd.read_csv(FILEPATH)
            print('loading data from csv file')

        self.X, self.y = self._load_data()
        self.semantic_labels = dict(
            zip(
                self.metadata["numeric_label"],
                self.metadata["semantic_label"],
            ))
コード例 #8
0
def main():
    """Print performance metrics for model at specified epoch."""
    # Data loaders
    tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
        task="target",
        batch_size=config("cnn.batch_size"),
    )

    # Model
    model = Target()

    # define loss function
    criterion = torch.nn.CrossEntropyLoss()

    # Attempts to restore the latest checkpoint if exists
    print("Loading cnn...")
    model, start_epoch, stats = restore_checkpoint(model,
                                                   config("cnn.checkpoint"))

    axes = utils.make_training_plot()

    # Evaluate the model
    evaluate_epoch(
        axes,
        tr_loader,
        va_loader,
        te_loader,
        model,
        criterion,
        start_epoch,
        stats,
        include_test=True,
        update_plot=False,
    )
コード例 #9
0
    def wrapper_load_config(string, entities):
        payload = dict()
        payload["string"] = string
        payload["entities"] = entities

        api_key = utils.config("api_key")
        pro = utils.config("pro")
        payload["temperature_units"] = utils.config("temperature_units")
        payload["wind_speed_units"] = utils.config("wind_speed_units")

        if ((payload["temperature_units"] != "celsius")
                and (payload["temperature_units"] != "fahrenheit")):
            return utils.output("end", "invalid_temperature_units",
                                utils.translate("invalid_temperature_units"))

        if payload["wind_speed_units"] == "meters per seconds":
            payload["wind_speed_units_response"] = payload["wind_speed_units"]
            payload["wind_speed_units"] = "meters_sec"
        elif payload["wind_speed_units"] == "miles per hour":
            payload["wind_speed_units_response"] = payload["wind_speed_units"]
            payload["wind_speed_units"] = "miles_hour"
        else:
            return utils.output("end", "invalid_wind_speed_units",
                                utils.translate("invalid_wind_speed_units"))

        if pro:
            payload["owm"] = OWM(api_key, subscription_type="pro")
        else:
            payload["owm"] = OWM(api_key)

        return func(payload)
コード例 #10
0
def main():
    """Train transfer learning model and display training plots.

    Train four different models with {0, 1, 2, 3} layers frozen.
    """
    # data loaders
    tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
        task="target",
        batch_size=config("target.batch_size"),
    )

    freeze_none = Target()
    print("Loading source...")
    freeze_none, _, _ = restore_checkpoint(
        freeze_none, config("source.checkpoint"), force=True, pretrain=True
    )

    freeze_one = copy.deepcopy(freeze_none)
    freeze_two = copy.deepcopy(freeze_none)
    freeze_three = copy.deepcopy(freeze_none)

    freeze_layers(freeze_one, 1)
    freeze_layers(freeze_two, 2)
    freeze_layers(freeze_three, 3)

    train(tr_loader, va_loader, te_loader, freeze_none, "./checkpoints/target0/", 0)
    train(tr_loader, va_loader, te_loader, freeze_one, "./checkpoints/target1/", 1)
    train(tr_loader, va_loader, te_loader, freeze_two, "./checkpoints/target2/", 2)
    train(tr_loader, va_loader, te_loader, freeze_three, "./checkpoints/target3/", 3)
コード例 #11
0
def main(device=torch.device('cuda:0')):
    # CLI arguments
    parser = arg.ArgumentParser(
        description='We all know what we are doing. Fighting!')
    parser.add_argument("--datasize",
                        "-d",
                        default="small",
                        type=str,
                        help="data size you want to use, small, medium, total")
    # Parsing
    args = parser.parse_args()
    # Data loaders
    datasize = args.datasize
    pathname = "data/nyu.zip"
    tr_loader, va_loader, te_loader = getTrainingValidationTestingData(
        datasize, pathname, batch_size=config("unet.batch_size"))

    # Model
    #model = Net()
    #model = Dense121()
    model = Dense169()
    model = model.to(device)

    # define loss function
    # criterion = torch.nn.L1Loss()

    # Attempts to restore the latest checkpoint if exists
    print("Loading unet...")
    model, start_epoch, stats = utils.restore_checkpoint(
        model, utils.config("unet.checkpoint"))
    acc, loss = utils.evaluate_model(model, te_loader, device)
    # axes = util.make_training_plot()
    print(f'Test Error:{acc}')
    print(f'Test Loss:{loss}')

    # Get Test Images
    img_list = glob("examples/" + "*.png")

    # Set model to eval mode
    model.eval()
    model = model.to(device)

    # Begin testing loop
    print("Begin Test Loop ...")

    for idx, img_name in enumerate(img_list):

        img = load_images([img_name])
        img = torch.Tensor(img).float().to(device)
        print("Processing {}, Tensor Shape: {}".format(img_name, img.shape))

        with torch.no_grad():
            preds = model(img).squeeze(0)

        output = colorize(preds.data)
        output = output.transpose((1, 2, 0))
        cv2.imwrite(img_name.split(".")[0] + "_result.png", output)

        print("Processing {} done.".format(img_name))
コード例 #12
0
class Admin(object):
    _cp_config = {
        "tools.basic_auth.on": True,
        "tools.basic_auth.realm": "localhost",
        "tools.basic_auth.users": {
            config("admin_user"): md5.new(config("admin_pw")).hexdigest()
        },
        "tools.expires.on": True
    }

    def __init__(self, parent):
        self.parent = parent

    @cpy.expose
    def index(self):
        #TODO: what's the right redirect to give here?
        raise cpy.InternalRedirect("/Admin/ls")

    def navbar(self, ns):
        """Run a callback so that any module can add an element to the Admin
        navbar"""
        ### this callback gives an admin module the ability to add a link to
        ### the navigation bar of the Admin section. It should return a list of
        ### (string, string) tuples where the first string is the relative link
        ### to the function, and the second is the name to display on the menu
        ns['modules'] = run_callback(
            self.parent.plugins,
            "cb_admin_navbar",
        )
        return ('admin_head', ns)

    def cb_admin_navbar(self):
        return [('ls', 'Edit Stories'), ('add', 'Add Story')]

    @cpy.expose
    def add(self):
        #XXX: Should I move story editing to a plugin?
        ns = {'title': "Adding New Entry"}
        return [self.navbar(ns), ('admin_storyadd', ns)]

    @cpy.expose
    def add_story(self, story_title="", story_body="", filename=""):
        if story_title == "" or story_body == "" or filename == "":
            raise cpy.InternalRedirect("ls")

        filename = os.path.join(config('datadir'), filename)
        if not filename.endswith('.txt'): filename += '.txt'

        if os.path.isfile(filename):
            ns = {'title': 'File Already Exists', 'filename': filename}
            return [self.navbar(ns), ('admin_story_already_exists', ns)]

        try:
            f = open(filename, 'w')
            f.write(story_title + "\n")
            f.write(htmlunescape(story_body))
        except Exception, e:
            cpy.log("unable to log: " + e.Message)
        finally:
コード例 #13
0
 def connect(cls):
     if cls._client is None:
         cls._client = redis.Redis(host=config('REDIS_HOST'),
                                   db=config('REDIS_DB'),
                                   port=config('REDIS_PORT'),
                                   password=config('REDIS_PASSWORD'),
                                   socket_timeout=0.2,
                                   socket_connect_timeout=0.2)
コード例 #14
0
 def __init__(self, Exchangeabi, ERC827abi, trade_token_add, rpc_url):
     self.trade_token_add = trade_token_add
     self.web3Instance = Web3(
         Web3.HTTPProvider(rpc_url, request_kwargs={'timeout': 240}))
     self.erc827 = self.web3Instance.eth.contract(
         abi=ERC827abi, address=config('ETH_TOKEN_ADDRESS'))
     self.exchange = self.web3Instance.eth.contract(
         abi=Exchangeabi, address=config('EXCHANGE_ADDRESS'))
コード例 #15
0
 def get_exchange(cls):
     clc_address, rpc_url = config("CLC_ADDRESS"), config("RPC_URL")
     exchange, ERC827abi = config("Exchange_FILE"), config("ERC827abi_FILE")
     with open(os.path.join(BASE_FILE_DIR, exchange), "r") as f:
         Exchangeabi = json.load(f)
     with open(os.path.join(BASE_FILE_DIR, ERC827abi), "r") as f:
         ERC827abi = json.load(f)
     return cls(Exchangeabi, ERC827abi, clc_address, rpc_url)
コード例 #16
0
def main():
    # Data loaders
    tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
        num_classes=config('cnn.num_classes'))

    # Model
    model = CNN()

    # TODO: define loss function, and optimizer
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-4)
    #

    print('Number of float-valued parameters:', count_parameters(model))

    # Attempts to restore the latest checkpoint if exists
    print('Loading cnn...')
    model, start_epoch, stats = restore_checkpoint(model,
                                                   config('cnn.checkpoint'))

    axes = utils.make_cnn_training_plot()

    # Evaluate the randomly initialized model
    _evaluate_epoch(axes, tr_loader, va_loader, model, criterion, start_epoch,
                    stats)

    # Loop over the entire dataset multiple times
    for epoch in range(start_epoch, config('cnn.num_epochs')):
        # Train model
        _train_epoch(tr_loader, model, criterion, optimizer)

        # Evaluate model
        _evaluate_epoch(axes, tr_loader, va_loader, model, criterion,
                        epoch + 1, stats)

        # Save model parameters
        save_checkpoint(model, epoch + 1, config('cnn.checkpoint'), stats)

    print('Finished Training')

    y_true, y_pred = [], []
    correct, total = 0, 0
    running_loss = []
    for X, y in va_loader:
        with torch.no_grad():
            output = model(X)
            predicted = predictions(output.data)
            y_true.extend(y)
            y_pred.extend(predicted)
            total += y.size(0)
            correct += (predicted == y).sum().item()
            running_loss.append(criterion(output, y).item())
    print("Validation data accuracies:")
    print(confusion_matrix(y_true, y_pred))

    # Save figure and keep plot open
    utils.save_cnn_training_plot()
    utils.hold_training_plot()
コード例 #17
0
def main(device=torch.device('cuda:0')):
    # CLI arguments
    parser = arg.ArgumentParser(
        description='We all know what we are doing. Fighting!')
    parser.add_argument("--datasize",
                        "-d",
                        default="small",
                        type=str,
                        help="data size you want to use, small, medium, total")
    # Parsing
    args = parser.parse_args()
    # Data loaders

    # TODO:
    ####### Enter the model selection here! #####
    modelSelection = input(
        'Please input the type of model to be used(res50,dense121,dense169,mob_v2,mob):'
    )

    datasize = args.datasize
    filename = "nyu_new.zip"
    pathname = f"data/{filename}"
    csv = "data/nyu_csv.zip"
    te_loader = getTestingData(datasize,
                               csv,
                               pathname,
                               batch_size=config(modelSelection +
                                                 ".batch_size"))

    # Model
    if modelSelection.lower() == 'res50':
        model = Res50()
    elif modelSelection.lower() == 'dense121':
        model = Dense121()
    elif modelSelection.lower() == 'mob_v2':
        model = Mob_v2()
    elif modelSelection.lower() == 'dense169':
        model = Dense169()
    elif modelSelection.lower() == 'mob':
        model = Net()
    elif modelSelection.lower() == 'squeeze':
        model = Squeeze()
    else:
        assert False, 'Wrong type of model selection string!'
    model = model.to(device)

    # define loss function
    # criterion = torch.nn.L1Loss()

    # Attempts to restore the latest checkpoint if exists
    print(f"Loading {mdoelSelection}...")
    model, start_epoch, stats = utils.restore_checkpoint(
        model, utils.config(modelSelection + ".checkpoint"))
    acc, loss = utils.evaluate_model(model, te_loader, device, test=True)
    # axes = util.make_training_plot()
    print(f'Test Error:{acc}')
    print(f'Test Loss:{loss}')
コード例 #18
0
 def get_order_book(limit=1):
     terra_handler = TerraChain.get_exchange()
     asks = terra_handler.exchange.functions.getAsks(
         limit, terra_handler.trade_token_add,
         config('ETH_TOKEN_ADDRESS')).call()
     bids = terra_handler.exchange.functions.getBids(
         limit, terra_handler.trade_token_add,
         config('ETH_TOKEN_ADDRESS')).call()
     return bids[2][0] / 10**18, asks[2][0] / 10**18
コード例 #19
0
    def forward(self, x):
        if config().sim.env.state.type == "simple":
            x = x.reshape(x.size(0), x.size(2))
            return F.gumbel_softmax(self.simple_fc(x),
                                    tau=config().learning.gumbel_softmax.tau)

        out = self.conv(x)
        out = out.view(x.size(0), -1)
        return F.gumbel_softmax(self.fc(out),
                                tau=config().learning.gumbel_softmax.tau)
コード例 #20
0
def main():
    # Data loaders
    tr_loader, va_loader, te_loader, get_semantic_labels = get_train_val_test_loaders(
        num_classes=config('cnn.num_classes'))

    # Model
    model = CNN()

    # TODO: define loss function, and optimizer
    params = list(model.conv1.parameters()) + list(
        model.conv2.parameters()) + list(model.conv3.parameters())
    params = params + list(model.fc1.parameters()) + list(
        model.fc2.parameters()) + list(model.fc3.parameters())
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(params, lr=0.0001)
    #

    print('Number of float-valued parameters:', count_parameters(model))

    # Attempts to restore the latest checkpoint if exists
    print('Loading cnn...')
    model, start_epoch, stats = restore_checkpoint(model,
                                                   config('cnn.checkpoint'))

    fig, axes = utils.make_cnn_training_plot()

    # Evaluate the randomly initialized model
    _evaluate_epoch(axes, tr_loader, va_loader, model, criterion, start_epoch,
                    stats)

    # Loop over the entire dataset multiple times
    for epoch in range(start_epoch, config('cnn.num_epochs')):
        # Train model
        _train_epoch(tr_loader, model, criterion, optimizer)

        # Evaluate model
        _evaluate_epoch(axes, tr_loader, va_loader, model, criterion,
                        epoch + 1, stats)

        # Save model parameters
        save_checkpoint(model, epoch + 1, config('cnn.checkpoint'), stats)

    print('Finished Training')

    model, _, _ = restore_checkpoint(model, config('cnn.checkpoint'))

    dataset = get_data_by_label(va_loader)
    evaluate_cnn(dataset, model, criterion, get_semantic_labels)

    # Save figure and keep plot open
    utils.save_cnn_training_plot(fig)
    utils.hold_training_plot()
コード例 #21
0
 def get_losses(self, state_batch, next_state_batch, action_batch,
                reward_batch):
     if config().learning.gumbel_softmax.use:
         action_batch = F.gumbel_softmax(
             action_batch, tau=config().learning.gumbel_softmax.tau)
     predicted_next_actions = self.actor_target(next_state_batch)
     y = reward_batch + self.gamma * self.critic_target(
         next_state_batch, predicted_next_actions)
     loss_critic = self.critic_criterion(
         y, self.critic(state_batch, action_batch))
     actor_loss = -self.critic(state_batch, self.actor(state_batch))
     actor_loss = actor_loss.mean()
     return loss_critic, actor_loss
コード例 #22
0
 def __init__(self):
     self.board = None
     self.scale = 16
     self.episode = 0
     self.iter = 0
     self.size = config().sim.env.size
     self.agent_position = None
     self.coin_positions = None
     self.board_memory = []
     self.state_memory = []
     self.mask_board = None
     self.seen_positions = []
     self.max_length = config().sim.env.max_length
コード例 #23
0
def login(string):
    config = {
        'redirect_uri': utils.config('callback_uri'),
        'response_type': 'code',
        'client_id': utils.config('client_id'),
        'scope': utils.config('scope')
    }

    url = 'https://accounts.spotify.com/authorize?' + urlencode(config)

    webbrowser.open(url)

    utils.output('end', 'success', utils.translate('login'))
コード例 #24
0
def visualize_layer1_activations(i):
    xi, yi = tr_loader.dataset[i]
    xi = xi.view((1,3,config('image_dim'),config('image_dim')))
    zi = F.relu(model.conv1(xi))
    zi = zi.detach().numpy()[0]
    sort_mask = np.argsort(model.conv1.weight.detach().numpy().mean(axis=(1,2,3)))
    zi = zi[sort_mask]
    fig, axes = plt.subplots(4, 4, figsize=(10,10))
    for i, ax in enumerate(axes.ravel()):
        ax.axis('off')
        im = ax.imshow(zi[i], cmap='gray')
    fig.suptitle('Layer 1 activations, y={}'.format(yi))
    fig.savefig('CNN_viz1_{}.png'.format(yi), dpi=200, bbox_inches='tight')
コード例 #25
0
ファイル: Entry.py プロジェクト: Tubbz-alt/cherry-blossom
    def __init__(self, filename, datadir):
        self.filename = filename
        self.relpath, self.ext = os.path.splitext(filename.split(datadir)[-1])
        self.relpath = self.relpath.strip('\/')
        self.datefmt = config('date_fmt', '%b %d, %Y')
        self._text = u''  #will store the text of the file
        self._encoding = config('blog_encoding', 'utf-8')

        #when this is set to 1, the entry will not reload from its source file
        #this can be used for a cache mechanism, but is currently unused
        self.reload_flag = 0

        self.metadata = {}
        self.parse_meta()
コード例 #26
0
def main():
    # data loaders
    tr_loader, va_loader, te_loader, _ = get_train_val_test_loaders(
        num_classes=config('autoencoder.classifier.num_classes'))

    ae_classifier = AutoencoderClassifier(config('autoencoder.ae_repr_dim'),
                                          config('autoencoder.classifier.num_classes'))
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(ae_classifier.parameters(),
                                 lr=config('autoencoder.classifier.learning_rate'))

    # freeze the weights of the encoder
    for name, param in ae_classifier.named_parameters():
        if 'fc1.' in name or 'fc2.' in name:
            param.requires_grad = False

    # Attempts to restore the latest checkpoint if exists
    print('Loading autoencoder...')
    ae_classifier, _, _ = restore_checkpoint(ae_classifier,
                                             config('autoencoder.checkpoint'), force=True, pretrain=True)
    print('Loading autoencoder classifier...')
    ae_classifier, start_epoch, stats = restore_checkpoint(ae_classifier,
                                                           config('autoencoder.classifier.checkpoint'))

    axes = utils.make_cnn_training_plot(name='Autoencoder Classifier')

    # Evaluate the randomly initialized model
    _evaluate_epoch(axes, tr_loader, va_loader, ae_classifier, criterion,
                    start_epoch, stats)

    # Loop over the entire dataset multiple times
    for epoch in range(start_epoch, config('autoencoder.classifier.num_epochs')):
        # Train model
        _train_epoch(tr_loader, ae_classifier, criterion, optimizer)

        # Evaluate model
        _evaluate_epoch(axes, tr_loader, va_loader, ae_classifier, criterion,
                        epoch + 1, stats)

        # Save model parameters
        save_checkpoint(ae_classifier, epoch + 1,
                        config('autoencoder.classifier.checkpoint'), stats)

    print('Finished Training')
    with torch.no_grad():
        y_true, y_pred = [], []
        correct, total = 0, 0
        running_loss = []
        for X, y in va_loader:
            output = ae_classifier(X)
            predicted = predictions(output.data)
            y_true.extend(y)
            y_pred.extend(predicted)
        print("Validation data accuracies:")
        print(confusion_matrix(y_true, y_pred))


    # Keep plot open
    utils.save_cnn_training_plot(name='ae_clf')
    utils.hold_training_plot()
コード例 #27
0
 def _get_state_progressive(self, board, blank):
     x, y = self.agent_position
     state = np.zeros_like(board)
     if blank:  # Useful in the beginning when there are no frames in the memory
         return state
     state[x, y] = board[x, y]
     depth_of_field = config().sim.env.state.depth_of_field
     positions = [self.agent_position]
     recursive_walk(positions, state, board, depth_of_field,
                    self.mask_board, self.seen_positions)
     if config().sim.env.state.type == "memory":
         # we add previously seen positions
         state[self.mask_board] = board[self.mask_board]
     return state
コード例 #28
0
def main():
    """Create confusion matrix and save to file."""
    tr_loader, va_loader, te_loader, semantic_labels = get_train_val_test_loaders(
        task="source", batch_size=config("source.batch_size"))

    model = Source()
    print("Loading source...")
    model, epoch, stats = restore_checkpoint(model,
                                             config("source.checkpoint"))

    sem_labels = "0 - Samoyed\n1 - Miniature Poodle\n2 - Saint Bernard\n3 - Great Dane\n4 - Dalmatian\n5 - Chihuahua\n6 - Siberian Husky\n7 - Yorkshire Terrier"

    # Evaluate model
    plot_conf(va_loader, model, sem_labels, "conf_matrix.png")
コード例 #29
0
def authorize(url):
    db = utils.db()['db']

    code = url.split("?code=")[1].split("&")[0]
    payload = {'redirect_uri': utils.config('callback_uri'),
                   'code': code,
                   'grant_type': 'authorization_code',
                   'scope': utils.config('scope')}
    
    auth_header = base64.b64encode(six.text_type(utils.config('client_id') + ':' + utils.config('client_secret')).encode('ascii'))
    headers = {'Authorization': 'Basic %s' % auth_header.decode('ascii')}

    results = requests.post('https://accounts.spotify.com/api/token', data=payload, headers=headers)

    token_info = results.json()

    token_info['expires_at'] = int(time.time()) + token_info['expires_in']

    token_info['client_id'] = utils.config('client_id')

    token_info['client_secret'] = utils.config('client_secret')

    token_info['prefix'] = utils.config('prefix')

    token_info['scope'] = utils.config('scope')

    db.insert(token_info)

    utils.output('end', 'success', utils.translate('logged'))
コード例 #30
0
def report_validation_performance(dataset, get_semantic_label, model, criterion):
    cols = ['Orig', 'Autoencoder recon', 'Orig', 'Autoencoder recon', 'Orig', 'Autoencoder recon']
    rows = []
    fig, axes = plt.subplots(nrows=config('autoencoder.num_classes'),
        ncols=6, figsize=(16,16))
    for i in range(config('autoencoder.num_classes')):
        X = dataset[i]
        rows.append(get_semantic_label(i))
        _, recon = model(X)
        error = criterion(recon, X).item()
        print('label {}, test error is {}'.format(i, error))
        losses = ((recon - X) ** 2).mean(3).mean(2).mean(1).data.numpy()
        best, worst = np.argmin(losses), np.argmax(losses)
        typical = np.argsort(losses)[len(losses)//2]
        print('  best case:', losses[best])
        print(' worst case:', losses[worst])
        print('    typical:', losses[typical])
        axes[i,0].imshow(utils.denormalize_image(
            np.transpose(X[best].numpy(), (1, 2, 0))),
            plt.get_cmap('gray'), interpolation='bicubic', clim=(-1.0, +1.0))
        axes[i,1].imshow(utils.denormalize_image(
            np.transpose(recon[best].data.numpy(), (1, 2, 0))),
            plt.get_cmap('gray'), interpolation='bicubic', clim=(-1.0, +1.0))
        axes[i,2].imshow(utils.denormalize_image(
            np.transpose(X[worst].numpy(), (1, 2, 0))),
            plt.get_cmap('gray'), interpolation='bicubic', clim=(-1.0, +1.0))
        axes[i,3].imshow(utils.denormalize_image(
            np.transpose(recon[worst].data.numpy(), (1, 2, 0))),
            plt.get_cmap('gray'), interpolation='bicubic', clim=(-1.0, +1.0))
        axes[i,4].imshow(utils.denormalize_image(
            np.transpose(X[typical].numpy(), (1, 2, 0))),
            plt.get_cmap('gray'), interpolation='bicubic', clim=(-1.0, +1.0))
        axes[i,5].imshow(utils.denormalize_image(
            np.transpose(recon[typical].data.numpy(), (1, 2, 0))),
            plt.get_cmap('gray'), interpolation='bicubic', clim=(-1.0, +1.0))

    for ax, row in zip(axes[:,0], rows):
        ax.annotate(row, xy=(0, 0.5), xytext=(110, 0),
            xycoords=ax.yaxis.label, textcoords='offset points',
            size='large', ha='right', va='center')

    for ax, col in zip(axes[0], cols):
        ax.set_title(col)

    for ax in axes.ravel():
        ax.axis('off')

    plt.suptitle('Autoencoder reconstruction\n Best, Worst, Typical', size=20)
    plt.savefig("ae_per_class_perf.png", dpi=200, bbox_inches='tight')
コード例 #31
0
ファイル: basehandler.py プロジェクト: chdb/DhammaMap
def sendEmailNow (**ka):  
    ok = u.sendEmail(**ka)        
    if ok and u.config('recordEmails'):
        try:
            m.SentEmail.create (**ka)
        except: # (apiproxy_errors.OverQuotaError, BadValueError):
            logging.exception("Error saving SentEmail in datastore")
コード例 #32
0
ファイル: Atom.py プロジェクト: llimllib/cherry-blossom
    def prepare_atom_template(self, entries):
        ns = cpy.config.get('/').copy()
        entry_structs = []
        last_updated = ''
        for e in entries:
            es = EntryStruct()       
            es.title = e.title

            #this callback gives any interested plugins the chance to change
            #the text of a story, as presented in a feed. It gives an Entry
            #object, and ignores any return value
            run_callback(self.parent.plugins, "cb_feed_story", e)
            fulltext = escape(e.text)

            #If you only want short descriptions:
            #es.desc = escape(e.text[:255])
            #for full text descriptions:
            es.desc = fulltext
            es.text = fulltext
            es.time = time.strftime('%Y-%m-%dT%H:%M:%SZ', e.time_tuple)
            if not last_updated: last_updated = es.time
            es.link = urljoin(config('base_url'), e.relpath + '.html')
            entry_structs.append(es)
        ns['last_updated'] = last_updated
        ns['entries'] = entry_structs
        return ('atom', ns)
コード例 #33
0
def replace_eluts(arr, scores, name='eluts'):
    species = set(ut.config()['elut_species'].split('_'))
    keep_cols = [n for n in arr.dtype.names[3:] if n[:2] not in species]
    keep_cols = keep_cols + [name]
    newarr = ppi.base_array(arr[['id1','id2','hit']], keep_cols, len(arr))
    newarr[name] = scores
    return newarr
コード例 #34
0
ファイル: blox.py プロジェクト: llimllib/cherry-blossom
    def init_plugins(self, pluginlist):
        """
        Initialize plugins. Assumes that each plugin contains a class of the
        same name as the file. If it does, this function attaches an instance 
        of that class to self, and adds that instance to the plugins array.
        """
        plugindir = config('plugin_dir', None)
        if not plugindir or not os.path.isdir(plugindir):
            cpy.log("Invalid Plugin Directory, no plugins loaded")
            return
        else:
            sys.path.append(plugindir)

        #XXX: Should we just scan for *.py files in the plugin dir? Should the
        # mechanism to remove a plugin be renaming it or taking it out of 
        # conf?
        for p in pluginlist:
            try:
                mod = __import__(p)
                if not hasattr(self, p):
                    instance = getattr(mod, p)(self)
                    setattr(self, p, instance)
                    self.plugins.append(instance)
                    cpy.log("successfully imported plugin module %s" % p)
                else:
                    raise ImportError
            #bare except, because the modules could raise any number of errors
            #on import, and we want them not to kill our server
            except:
                cpy.log("import failed on module %s, module not loaded" % p)
                cpy.log("%s" % sys.exc_info()[0])
                cpy.log("%s" % traceback.format_exc())
コード例 #35
0
ファイル: i18n.py プロジェクト: chdb/DhammaMap
def getLocaleStrings (handler):
    ctag = set_locale (handler)  # current locale as a string in form: 'aa' or 'aa_AA'  eg: 'en' or 'fr_CA'
    ls = memcache.get (ctag)     # requests from a user will generally have same locale so it makes sense to hold this in memcache @UndefinedVariable
    if ls is None:               # ... and even more so because also many different users will use same locale (memcache is global to the app)
        locale_tags = u.config ('locales')
        ls = LocaleStrings (ctag, locale_tags)
        memcache.add (ctag, ls)  # @UndefinedVariable
    return ls
コード例 #36
0
ファイル: Admin.py プロジェクト: llimllib/cherry-blossom
    def ls(self, dir=""):
        dirname = os.path.join(config('datadir'), dir)
        l = [f for f in os.listdir(dirname) if f.endswith('.txt')]
        title = "Listing dir %s" % dir

        ns = locals()
        del ns['self']

        return [self.navbar(ns), ('admin_ls', ns)]
コード例 #37
0
ファイル: blox.py プロジェクト: llimllib/cherry-blossom
    def __init__(self):
        self.timeformats =  [["%Y", "%d", "%m", "%b", "%B"],
            ["%Y %b", "%Y %m", "%Y %b", "%Y %B", "%m %d", "%b %d", "%B %d"],
            ["%Y %m %d", "%Y %b %d", "%Y %B %d"]]
        self.plugins = [] #contains all loaded plugins

        #set the output encoding
        self._cp_config["cpy.tools.encode.encoding"] = "utf-8"

        self.now = datetime.datetime.now
        self.last_update = self.now()
        self.num_entries = config('num_entries')
        self.datadir = config('datadir')
        self.ignore_directories = config('ignore_directories')
        self.fp = '' #a cache of the front page content
        self.index() #thus, we don't have to parse the metadata of the front 
                     #page article when the second request comes in
        self.init_plugins(config('plugins'))
        FileCabinet.get_most_recent(self.datadir) #initialize entries
コード例 #38
0
ファイル: daemon.py プロジェクト: RBron/sun
 def __init__(self):
     pynotify.uninit()
     pynotify.init("sun")
     self.pkg_count = fetch()[0]
     self.message_added = ""
     self.summary = "{0}Software Updates".format(" " * 14)
     self.message = ("{0}{1} Software updates are available\n".format(
         " " * 3, self.pkg_count))
     self.icon = "{0}{1}.png".format(icon_path, __all__)
     self.n = pynotify.Notification(self.summary, self.message, self.icon)
     self.n.set_timeout(60000 * int(config()["STANDBY"]))
コード例 #39
0
ファイル: Keywords.py プロジェクト: llimllib/cherry-blossom
 def cb_story(self, entry):
     """Add a $keywords variable to an entry which is a linkified,
         comma-seperated string"""
     kwstring = ''
     base_url = config('base_url')
     base_url = base_url.rstrip('/')
     #list comp for 2.3 compatibility
     kws = [k.strip() for k in keysplit(entry.metadata.get('keywords', "")) if k != '']
     links = ['<a href=%s/Keywords/%s>%s</a>' % (base_url, quote_plus(kw), kw) 
                 for kw in kws]
     #add a comma seperated list of keywords to the entry
     entry.keywords = ', '.join(links)
コード例 #40
0
ファイル: main.py プロジェクト: fatman2021/sun
def main():

    while True:
        connection = True
        time.sleep(1)
        try:
            urllib2.urlopen(mirror())
        except urllib2.URLError:
            connection = False
        if connection:
            Notify().show()
            time.sleep(60 * int(config()["INTERVAL"]))
コード例 #41
0
ファイル: Admin.py プロジェクト: llimllib/cherry-blossom
    def update_story(self, story_title="", story_body="", filename=""):
        if story_title == "" or story_body == "" or filename == "":
            raise cpy.InternalRedirect("ls")

        filename = os.path.join(config('datadir'), filename)
        tmpfile = filename + ".bak"
        try:
            f = open(tmpfile, 'w')
            f.write(story_title + "\n")
            f.write(htmlunescape(story_body))
        except Exception, e:
            os.unlink(tmpfile)
            cpy.log("unable to log: " + e.Message)
コード例 #42
0
def pd_evidences(cluster,arr):
    arr_clust = arr[[i for i,r in enumerate(arr) 
            if r[0] in cluster and r[1] in cluster]]
    # doesn't seem right--if most the interactions are strong, shouldn't
    # normalize them down--should still count. but does it matter since it's
    # all comparative? messes with thinking about the clipping though in
    # score_together.
    #features, _ = ml.normalize(ml.arr_feats(arr_clust))
    sps = ut.config()['elut_species'].split('_')
    names = [n for n in arr.dtype.names[3:] if n[:2] in sps]
    features = arr[names]
    pd_ev = pd.PairDict([[r[0],r[1]] + list(features[i]) for i,r in
        enumerate(arr_clust)])
    return pd_ev, names
コード例 #43
0
    def highlight_code(self, textstr, font_tags=False):
        for lang, code in CODE_RE.findall(textstr):
            if not lang: lang = "python"

            try:
                lexer = get_lexer_by_name(lang.strip('"'))
            except ClassNotFound:
                return
            formatter = HtmlFormatter(style=config("syntax_style", "default", 
                "SyntaxHighlight"), noclasses=font_tags)
            code = pygments.highlight(code, lexer, formatter)

            textstr = CODE_RE.sub(code, textstr, 1)
        return textstr
コード例 #44
0
ファイル: Keywords.py プロジェクト: llimllib/cherry-blossom
    def default(self, *args, **kargs):
        if len(args) > 1:
            return self.parent.error_page("too many arguments to Keywords")
        elif len(args) < 1:
            return self.parent.error_page("Too few arguments to Keywords")

        try:
            offset = int(kargs.get('offset', 0))
        except ValueError:
            offset = 0
        
        self.keyword = unquote_plus(args[0])
        print "Key =  %s" % self.keyword
        
        #remember we're not sure if base_url has a trailing '/' or not...
        if 'Atom' in config("plugins"):
            self.atom_link = config('base_url').rstrip('/') + \
                '/Atom/keywords/' + self.keyword
        
        entries = get_entries_by_meta('keywords')
        entries = [e for e in entries if self.keyword in keysplit(e.metadata['keywords'])]
        entries = entries[offset:offset + config("num_entries")]
        return self.parent.render_page(entries, self.keyword, offset)
コード例 #45
0
ファイル: basehandler.py プロジェクト: chdb/DhammaMap
 def _initDelay (minWait):
     _s.delay = minWait # ds
     for key, diff, cf in _s.monitors.itervalues():
         nBad = _s._get (key, diff) [0]
         if nBad:
             #logging.debug('extra = %d for %d bad %s logins', cf.delayFn(nBad), nBad, cf.name)
             _s.delay += cf.delayFn(nBad)
     d = _s.delay*100.0                  # Convert from int-deciseconds to float-milliseconds 
     mcka = u.config('MemCacheKeepAlive')# Divide d into a series of equal waits so each wait is the max that is less than MemCacheKeepAlive
     n = -(-d//mcka) # number of waits. NB -(-a//b) rounds up and is equivalent to math.ceil (a/b)
     _s.wait = int(-(-d//n)) # .. round up to int-millisecs
     
     logging.debug('delay = %d ms, n = %d, wait = %d ms, total = %d', d, n, _s.wait, _s.wait*n)
     assert _s.wait <= mcka
     assert n     * _s.wait >= d
     assert (n-1) * _s.wait <= d
コード例 #46
0
ファイル: basehandler.py プロジェクト: chdb/DhammaMap
        def _initMonitors (ema, ipa, hlr):
        
            def _insert (name, key, diff):
                assert name in lCfg
                #diff is the distinct value 
                _s.monitors[name] = ('L:'+hlr+':'+key, diff, lCfg[name])

            cfg = u.config(hlr)
            lCfg = cfg.lockCfg
            _s.monitors = {}
                    # name    ,key  ,diff
            _insert ('ema_ipa',_s.ei,None)
            _insert ('ema'    ,ema  ,ipa )
            _insert ('ipa'    ,ipa  ,ema )       
            #logging.debug('monitors = %r',_s.monitors)
            return cfg
コード例 #47
0
ファイル: orth.py プロジェクト: marcottelab/infer_complexes
def convert_dict_single(fromtype, totype):
    """
    totype: must be Sp (eg 'Hs') or Sp_seqdb
    Returns None if not necessary or not found.
    """
    if len(totype.split('_')) > 1:
        # Get rid of the 2nd half of totype if it's default for that species
        tosp, toseqdb = totype.split('_')
        if toseqdb == ut.config()[tosp+'_default']:
            totype = tosp
    if fromtype == totype:
        return None
    elif len(fromtype) == len(totype) == 2:
        return odict(fromtype, totype)
    else:
        return custom_conversion(fromtype, totype)
コード例 #48
0
ファイル: basehandler.py プロジェクト: chdb/DhammaMap
 def _taskqueue(h, *pa, **ka):
     """ Check, if in Staging or Production, that h is being executed by Taskqueue 
         Otherwise, allow run in localhost calling the url
     """
     if h.request.headers.get('X-AppEngine-TaskName'):
         assert h.request.path.startswith('/tq')
     elif u.config('Env') == 'Prod': 
         if not users.is_current_user_admin():   # we cant use this test in devServer or if logged-in as admin 
             logging.warning('Someone hacking a task url? pushQueueMethod does not have taskname header')  
             return h.error(403) #Forbidden
     try:
         return taskhandler(h, *pa, **ka)
     except (TransientError, DeadlineExceededError):
         raise # keep trying! (Exceptions in Push Queue Tasks are caught by the system and retried with exp backoff.)
     except: 
         logging.exception("Task Failed:") #other exceptions - just give up!
コード例 #49
0
def merge_by_species(arr, matches, func, remove=False):
    """
    matches: like [apex] or [wcc, apex, ...]
    Makes patterns with match for each species, like 'Hs.*apex
    """
    assert not isinstance(matches,str), "matches is list, not string"
    def merge_recurse(arr, patterns, func):
        if patterns:
            newarr = merge_features(arr, patterns[0], func, remove)
            return merge_recurse(newarr, patterns[1:], func)
        else:
            return arr
    # Won't match a merged feature since that will have a * in it.
    patterns = [sp+'\w*'+match for match in matches for sp in
            ut.config()['elut_species'].split('_')]
    return merge_recurse(arr, patterns, func)
コード例 #50
0
ファイル: fnet.py プロジェクト: marcottelab/infer_complexes
def score_arr_ext(arr, species, ext_key):
    """
    Key_or_data: either a string matching one of the keys for ext data in
    config.py, or a tuple of (name,data) where data is a sequence of (id1, id2,
    score), and the sequence can be a generator.
    fnet_cols: list of columns or first 2 letters to include, eg ['HS','CE']
    """
    ext_file = ut.config()[ext_key]
    conv_dict = convdict_from_fname(species, ext_file)
    filename = ut.proj_path('fnet_path', ext_file)
    stored_names = fnet_names(ext_file) # None if only one data column.
    names = stored_names if stored_names else [ext_key]
    data_dict = load_net(ut.load_tab_file(filename))
    print 'External data file: %s; size: %s; cols: %s' % (ext_file,
            len(data_dict), len(names))
    score_arr(arr, species, names, data_dict, conv_dict)
コード例 #51
0
ファイル: Admin.py プロジェクト: llimllib/cherry-blossom
    def add_story(self, story_title="", story_body="", filename=""):
        if story_title == "" or story_body == "" or filename == "":
            raise cpy.InternalRedirect("ls")

        filename = os.path.join(config('datadir'), filename)
        if not filename.endswith('.txt'): filename += '.txt'
        
        if os.path.isfile(filename): 
            ns = {'title': 'File Already Exists', 'filename': filename}
            return [self.navbar(ns), ('admin_story_already_exists', ns)]

        try:
            f = open(filename, 'w')
            f.write(story_title + "\n")
            f.write(htmlunescape(story_body))
        except Exception, e:
            cpy.log("unable to log: " + e.Message)
コード例 #52
0
ファイル: Admin.py プロジェクト: llimllib/cherry-blossom
    def edit(self, filename):
        try:
            #XXX: are we sure that filename can't ref previous dirs? ../ didn't
            #       work in my basic test, but how should we better sanitize 
            #       this?
            fullname = os.path.join(config("datadir"), filename)
            f = file(fullname)
        except IOError:
            cpy.log("Unable to open file %s for editing" % fullname)
            return
        title = "Editing file %s" % filename
        story_title = f.readline()
        body = htmlescape(f.read())

        ns = locals()
        del ns['self']

        return [self.navbar(ns), ('admin_storyedit', ns)]
コード例 #53
0
ファイル: blox.py プロジェクト: llimllib/cherry-blossom
    def essays(self, offset=0):
        try:
            offset = int(offset)
        except ValueError:
            offset = 0
    
        ns = cpy.config.get('/').copy()

        datadir = config('datadir')
        essays = self.files(offset)

        ns['offset'] = offset
        if len(essays) == ns['num_entries']:
            ns['offset_next'] = offset + ns['num_entries']

        ns.update({'essays': essays,
                   'offset': offset,
                   'pagename': "essays"})
        return self.render((('head', ns), ('essays', ns), ('foot', ns)))
コード例 #54
0
ファイル: i18n.py プロジェクト: chdb/DhammaMap
def get_locale_from_accept_header(request, localeTags):
    """ Detect a locale from request.header 'Accept-Language'
    The locale with the highest quality factor (q) that most nearly matches our config.locales is returned.
    rh: webapp2.RequestHandler

    Note that in the future if all User Agents adopt the convention of sorting quality factors in descending order
    then the first can be taken without needing to parse or sort the accept header leading to increased performance.
    (see http://lists.w3.org/Archives/Public/ietf-http-wg/2012AprJun/0473.html)
    """
    header = request.headers.get("Accept-Language", '')
    parsed = parse_accept_language_header(header)
    if parsed is None:
        return None
    pairs_sorted_by_q = sorted(parsed.items(), key=lambda (lang, q): q, reverse=True)
    locale = Locale.negotiate( [lang for (lang, q) in pairs_sorted_by_q]
                             , u.config('locales')
                             , sep='_'
                             )
    return u.utf8(locale)
コード例 #55
0
ファイル: score.py プロジェクト: marcottelab/infer_complexes
def precalc_scores(scoref, dtype='f2'):
    """
    Also zero out the diagonal to more efficiently remove all self-interactions
    up-front.
    """
    # NOTE to change dtype you must change it in loadtxt below!!
    save_compact = ut.config()['save_compact_corrs'] 
    compactf = '%s.%s.pyd' % (scoref, dtype)
    if os.path.exists(compactf): 
        mat = ut.loadpy(compactf)
        inds = range(mat.shape[0]) # always square score matrix
        mat[inds, inds] = 0
        return mat
    else:
        ascores = np.loadtxt(scoref, dtype='f2')
        if save_compact:
            print 'saving compact', compactf
            ut.savepy(ascores, compactf)
        return ascores
コード例 #56
0
ファイル: blox.py プロジェクト: llimllib/cherry-blossom
    def default(self, *args, **kwargs):
        #allow a plugin to handle a default url if it wants; it needs to return 
        #a tuple (pagename, [Entry objects]) if it does 
        call_result = run_callback(self.plugins, 'cb_default', args) 
        if call_result != []: return self.render_page(call_result[1:], call_result[0])

        try:
            offset = int(kwargs.get('offset', 0))
        except ValueError:
            offset = 0

        z = args[0]
        l = len(args)
        if l <= len(self.timeformats):
            #check to see if args represent a date
            for fmt in self.timeformats[l-1]:
                try:
                    t = time.strptime(' '.join(args), fmt)
                    if "%Y" in fmt:
                        year = t[0]
                    else:
                        year = self.now().year
                    if "%m" in fmt or "%b" in fmt or "%B" in fmt:
                        month = t[1]
                    else:
                        month = None
                    if "%d" in fmt:
                        day = t[2]
                    else:
                        day = None
                    entries = FileCabinet.get_entries_by_date(year, month, day)
                    if entries:
                        entries = entries[offset:offset + config('num_entries')]
                        return self.render_page(entries, ' '.join(args), offset)
                except ValueError:
                    #not a date - move on
                    pass
        z = os.path.join(*args)
        fname = self.stripall(z, '.html', '.htm', '.txt')
        e = FileCabinet.get_one(fname, self.datadir)
        if e:
            return self.render_page([e])
        return self.error_page('Page Not Found', 404)
コード例 #57
0
ファイル: i18n.py プロジェクト: chdb/DhammaMap
 def getLocale(rh, tag):
     ##todo: review all this fn code
     """ Retrieve the locale tag from a prioritized list of sources
     NB We cannot return None because there has to be a locale - clearly UI text has to be in some language or other.
     """
     localeTags = u.config('locales')
     if localeTags: 
         # 1. use tag param
         if tag in localeTags:
             return tag
         # 2. retrieve locale tag from url query string
         tag = rh.request.get("hl", None)
         if tag:
             qs_items = rh.request.GET
             del qs_items['hl']  # remove the hl item from the qs - it has now been processed 
         if tag in localeTags:
             return tag
         # 3. retrieve locale tag from cookie
         tag = rh.request.cookies.get('hl', None)
         if tag in localeTags:
             return tag
         # 4. retrieve locale tag from accept language header
         tag = get_locale_from_accept_header(rh.request, localeTags)
         if tag:
             return tag
         # 5. detect locale tag from IP address location
         ctry = getRequestLocation(rh.request, 'Country')
         if ctry:
             tag = Locale.negotiate(ctry, localeTags)
             if tag:
                 return tag
         # 6. use the 1st member of localeTags
         tag = localeTags[0]
         if tag:
             return tag
         # 7. Use this locale if all attempts above have failed.
     return 'en' # NB 'en' is chosen simply because the string literals and comments in this app happen to be in English. Its not because of a bias.
コード例 #58
0
ファイル: Rss.py プロジェクト: llimllib/cherry-blossom
    def prepare_rss_template(self, entries):
        ns = cpy.config.get('/').copy()
        entry_structs = []
        for e in entries:
            #XXX: what exactly is the <guid> element?
            #XXX: what is the category tag? should keywords go here?
            es = EntryStruct()
            es.title = e.title

            #this callback gives any interested plugins the chance to change
            #the text of a story, as presented in a feed. It gives an Entry
            #object, and ignores any return value
            run_callback(self.parent.plugins, "cb_feed_story", e)

            #because <style> messed me up, I'm going to stop stripping
            #HTML out of my description. The RSS spec sucks.
            es.desc = e.text
            es.link = urljoin(config('base_url'), e.relpath + '.html')
            es.relpath = e.relpath
            es.time = time.strftime('%Y-%m-%dT%H:%M:%SZ', e.time_tuple)
            es.text = e.text
            entry_structs.append(es)
        ns['entries'] = entry_structs
        return ('rss', ns)
コード例 #59
0
ファイル: Rss.py プロジェクト: llimllib/cherry-blossom
 def keyword_rss(self, kw):
     num_entries = config('num_entries', 10)
     entries = get_entries_by_meta('keywords')
     entries = [e for e in entries if kw in e.metadata['keywords']]
     return self.prepare_rss_template(entries[:num_entries])
コード例 #60
0
            else:
                print benchmark, 'price not retrieved, page not saved'

        else:
            # Timed out
            print benchmark, "timed out"
            pass

    return None


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Pricebot -- update wiki page")
    parser.add_argument('-q', '--quiet', action='store_true',
                        default=False, help='Quiet non-error output')
    args = parser.parse_args()

    config = config(quiet=args.quiet)

    # Now do the work!
    url = config.setting['pricebot']['wiki_url']
    httpuser = config.setting['pricebot']['username']
    httppass = config.setting['pricebot']['password']

    site = setup(url, httpuser, httppass, config)

    # Then check if the bot is disabled, and act accordingly
    if bot_status(site) == 1:
        set_exchange_rates(site)
        set_crude_prices(site)