示例#1
0
 def __init__(self,
              root,
              nCls,
              splits_path,
              split,
              img_size,
              apply_transform=True):
     self.apply_transform = apply_transform
     self.img_size = (img_size, img_size)
     self.nCls = nCls
     self.mean = np.array([104.00699, 116.66877, 122.67892])
     self.image_files = []
     self.label_files = []
     self.root = root
     with open(os.path.join(splits_path, '{}.txt'.format(split))) as f:
         lines = f.readlines()
     filenames = [l.strip() for l in lines]
     N = len(filenames)
     pbar = InitBar()
     print('Loading image and label filenames...\n')
     for i in range(N):
         pbar(100.0 * float(i) / float(N))
         image, label = filenames[i].split()
         self.image_files.append(root + image)
         self.label_files.append(root + label)
def create_covoting_matrix(session, member_map):
    n_members = len(member_map)

    covoting_matrix = []
    for i in range(n_members):
        covoting_matrix.append([])
        for j in range(n_members):
            covoting_matrix[i].append({'vote_equal': 0, 'vote_different': 0})

    num_polls = s.query(Poll).count()

    pbar = InitBar(title="Parsing Votes")
    pbar(0)

    for i, poll_id in enumerate(s.query(Poll.id)):
        votes = s.query(Vote).filter(Vote.poll_id == poll_id).filter(
            Vote.vote_option != 'Frånvarande').all()
        for vote1 in votes:
            for vote2 in votes:
                if vote1.member_id in member_map and vote2.member_id in member_map:
                    if vote1.vote_option == vote2.vote_option:
                        covoting_matrix[member_map[vote1.member_id]][
                            member_map[vote2.member_id]]['vote_equal'] += 1
                    else:
                        covoting_matrix[member_map[vote1.member_id]][
                            member_map[vote2.member_id]]['vote_different'] += 1

        pbar((100.0 * i) / num_polls)
    return covoting_matrix
示例#3
0
def load_dataset():
    # load training data
    training_images = []
    training_labels = []
    print('Loading training data:')
    bar = InitBar()
    for i in range(NUM_CLASSES):
        bar(100.0 * float(i) / float(NUM_CLASSES))
        pathname = os.path.join(datapath, 'mnist_png', 'training', str(i))
        for fname in os.listdir(pathname):
            fullfname = os.path.join(pathname, fname)
            img = cv2.imread(fullfname, 0)
            training_images.append(img)
            training_labels.append(i)
    training_images = np.stack(training_images, axis=0)
    training_labels = np.array(training_labels)
    print('training data has the shape of {}\n'.format(training_images.shape))

    # load test data
    test_images = []
    test_labels = []
    print('Loading test data:')
    for i in range(NUM_CLASSES):
        bar(100.0 * float(i) / float(NUM_CLASSES))
        pathname = os.path.join(datapath, 'mnist_png', 'testing', str(i))
        for fname in os.listdir(pathname):
            img = cv2.imread(os.path.join(pathname, fname), 0)
            test_images.append(img)
            test_labels.append(i)
    test_images = np.stack(test_images, axis=0)
    test_labels = np.array(test_labels)
    print('test data has the shape of {}\n'.format(test_images.shape))

    return (training_images, training_labels, test_images, test_labels)
示例#4
0
def populate_user(db, user_count, verbose=True):
    """
    This function create `user_count` number of random user in database
    :param db: a handler to the DBConnection object
    :param user_count: an integer value with number of new users
    """
    import names
    from progress_bar import InitBar

    if verbose:
        print("Starting creating users")
    if user_count > MAX_UNIQUE_USER_COUNT:
        raise ValueError("Can not generate more than %d unique user" %
                         MAX_UNIQUE_USER_COUNT)

    users = []
    created = 0
    progress_bar = InitBar(size=user_count, stream=sys.stdout)

    unique_identifier = set()
    for ind in range(user_count):
        # generate random identifier
        identifier = names.get_first_name()
        attempt = 0
        while identifier in unique_identifier:
            identifier = names.get_first_name()
            attempt += 1
            if attempt > user_count:
                raise RuntimeError(
                    "Can not find unique name. Please choose small amount of records"
                )

        unique_identifier.add(identifier)
        user = {
            'identifier': identifier,
            'bb_username': identifier,
            'bb_password': '******'
        }
        result = yield db.users.createUser(user)
        if result:
            created += 1
        users.append(user)
        if verbose:
            progress_bar(ind + 1)

    if verbose:
        print()
        print("Created %d new users, %d skipped" %
              (created, user_count - created))

    defer.returnValue(map(lambda x: x['identifier'], users))
示例#5
0
    def synthesis(self):
        """
        synthesis
        """
        number_FFT = np.array([self.numFFT, self.numFFT], dtype=np.int0)
        INF = np.inf * np.ones([self.numFFT, 1])
        min_current = 10 ** (-np.abs(self.excitation_ratio) / 20.)
        m = 0
        print("执行阵列综合开始,祈祷不报错".center(25, "-"))

        for trial in np.arange(1, self.max_trial + 1):
            msg_1 = "随机生成阵列激励第" + str(trial) + "次"
            # initial current space
            self.current = np.random.random(size=self.numberUV) * self.array_mask
            # time bar
            pbar = InitBar(title=msg_1)
            for iteration in np.arange(1, self.max_iteration + 1):
                # Normalized the AF
                af_space = ifftshift(ifft2(self.current, number_FFT))
                af_abs = np.abs(af_space)
                max_ind = np.unravel_index(af_abs.argmax(), af_abs.shape)
                maxAF = af_abs.max()
                af_abs = af_abs / maxAF
                af_space = af_space / maxAF

                # Find all FF nulls
                min_val = np.sign(np.diff(np.hstack([INF, af_abs, INF])))
                min_ind = np.where(np.diff(min_val + (min_val == 0)) == 2)

                # Find all FF peaks
                peak_ind = np.diff(min_val) < 0
                indP = np.argsort(af_abs[peak_ind])[::-1]

                # Find indices all SLL directions

                # Adapt AF to SLL constrains

                current = fft2(ifftshift(af_space))
                # Truncate current
                self.current = abs(current[0:self.numberUV[0], 0:self.numberUV[1]]) * self.array_mask
                self.current = self.current / np.max(self.current)
                select = self.current > 0
                self.current[self.current(select) < min_current] = min_current

                pbar(iteration / self.max_iteration * 100)

            del pbar

        print("执行结束,万幸".center(25, "-"))
示例#6
0
 def __init__(self, root, img_size, apply_transform=True):
     self.apply_transform = apply_transform
     self.img_size = img_size
     self.image_files = []
     self.label_files = []
     self.root = root
     with open(os.path.join(root, 'ImageSets/CLS-LOC/train_cls.txt')) as f:
         lines = f.readlines()
     filenames = [l.strip() for l in lines]
     N = len(filenames)
     pbar = InitBar()
     print('Loading imagenet filenames...\n')
     for i in range(N):
         pbar(100.0 * float(i) / float(N))
         image = filenames[i].split()[0]
         self.image_files.append(root + '/' + image + '.JPEG')
def download_from_Gdrive(gservice,this_file):
    try:
        #download current file into memory
        download_request = gservice.files().get_media(fileId=this_file['id'])
        readbyte.flush()  # Using an in memory stream location
        downloader = MediaIoBaseDownload(readbyte, download_request)
        done = False
        pbar = InitBar('Downloading: '+ this_file['name'])
        while done is False:
            status, done = downloader.next_chunk()
            pbar(int(status.progress() * 100))
        del pbar
        # tempbyte.close()
        return readbyte
    except (Exception) as e:
        log.error('Error in downloading file from gdrive' +this_file['name'])
        log.error('\n'+str(e))
示例#8
0
def load_dataset(part, SZ):
    # sz: the output size of the model, to which we want to shrink our target sizes
    # load training data
    images = []
    labels = []
    data_list_path = os.path.join(cwd, 'datasetVOC')
    dataset_path = '/home/monaj/bin/VOCdevkit/VOC2012'
    print('Loading {} data:'.format(part))
    with open(os.path.join(data_list_path, '{}.txt'.format(part))) as f:
        lines = f.readlines()
    filenames = [l.strip() for l in lines]
    N = len(filenames)
    pbar = InitBar()
    for i in range(N):
        pbar(100.0 * float(i) / float(N))
        image_path, label_path = filenames[i].split()
        full_image_path = dataset_path + image_path
        full_label_path = dataset_path + label_path
        image = cv2.resize(cv2.imread(full_image_path), (H, W),
                           interpolation=cv2.INTER_CUBIC)
        label = cv2.resize(cv2.imread(full_label_path, 0),
                           SZ,
                           interpolation=cv2.INTER_NEAREST)
        images.append(image)
        labels.append(label)
    mean_image = np.stack([
        IMG_MEAN[0] * np.ones((H, W)), IMG_MEAN[1] * np.ones(
            (H, W)), IMG_MEAN[2] * np.ones((H, W))
    ],
                          axis=-1)
    mean_batch = np.expand_dims(mean_image, axis=0)
    images = np.stack(images, axis=0)
    images = images.astype('float32')
    images -= mean_batch
    labels = np.stack(labels, axis=0)
    print('\n\n{} data was loaded with a shape of {}'.format(
        part, images.shape))
    print('{} label was loaded with a shape of {}'.format(part, labels.shape))

    return (images, labels)
示例#9
0
def main():
    ################################################
    ########## Preparing the dataset ###############
    ################################################
    training_images, training_labels = load_dataset('train_limited', (40,40)) # The output of this resnet model is (40,40)
    val_images, val_labels = load_dataset('val', (H,W))
    training_labels = training_labels.astype('float32')
    val_labels = val_labels.astype('float32')
    training_labels[training_labels==255] = -1
    val_labels[val_labels==255] = -1

    N = training_labels.shape[0]
    Nv = val_labels.shape[0]
    perm_train = np.random.permutation(N)

    training_images = training_images[perm_train, :, :, :]
    training_labels = training_labels[perm_train, :, :]

    training_labels = np.reshape(training_labels, (N, 40*40, 1))
    #val_labels = np.reshape(val_labels, (Nv, H*W, 1))

    ################################################
    ######## Building the model ####################
    ################################################
    model = resnet50_deeplab()

    """
    #################################################################
    ### Exporting the trained weights into a dictionary
    ### This piece of code is run a Tensorflow program where we have trained paramaters.
    #################################################################
    voc_trained_weights = {}
    for v in tf.trainable_variables():
        wname = v.name.encode('ascii','ignore')
        wname = wname.replace('weights','kernel')
        wname = wname.replace('biases','bias')
        voc_trained_weights[wname] = sess.run(v)

    for v in tf.model_variables():
        wname = v.name.encode('ascii','ignore')
        voc_trained_weights[wname] = sess.run(v)

    np.save('voc_traineded_weights.npy', voc_trained_weights)
    #################################################################
    """

    #################################################################
    ### Writing the already trained and saved parameters into the keras weights
    #################################################################
    
    voc_trained_weights = np.load('voc_trained_weights.npy')[()]
    for l in model.layers:
        print('{:<20s}: {:<25s} -> {}'.format(l.name, l.input_shape,l.output_shape))
        trainable_weights = l.trainable_weights
        if not trainable_weights:
            continue
        len_w = len(trainable_weights)
        old_weights = l.get_weights()
        weights = []
        for i in range(len_w):
            wname = trainable_weights[i].name.encode('ascii','ignore')
            weights.append(voc_trained_weights[wname])
        if len(old_weights)>2:
            wnames = wname.split('/')
            wname = wnames[0] + '/' + 'moving_mean:0'
            weights.append(voc_trained_weights[wname])
            wname = wnames[0] + '/' + 'moving_variance:0'
            weights.append(voc_trained_weights[wname])
        l.set_weights(weights)
    print('Trained VOC12 weights were loaded into the Keras model.')
    
    ################################################################
    ### Test Results
    ################################################################
    print('\nValidation step:\n')
    BATCH_SIZE = 2
    #accuracy = model.evaluate(x=val_images, y=val_labels, batch_size=BATCH_SIZE)
    #print('\n')
    #print('Keras test score = {}'.format(accuracy))
    #print('\n')

    #################################################################
    ### Sample visualization
    #################################################################
    MULTI_SCALE = 0
    conf_mat_total = np.zeros((NUM_CLASSES,NUM_CLASSES))
    pbar = InitBar()
    for i in range(Nv):
        pbar(100.0*float(i)/float(Nv))
        img = val_images[i]
        """ Strangely single-scale works a little better.
            I have even tried multi-scale with merging in (40,40) resolution
            and then resizing the result, but even though it boosted
            the performance 0.2%, it was still lower than the Single-scale architecture. """
        if not MULTI_SCALE:
            prediction100 = model.predict(np.expand_dims(img, axis=0))
            prediction    = np.reshape(prediction100, (H*W, NUM_CLASSES))
        else:
            img075 = cv2.resize(img, (int(0.75*H),int(0.75*W)), cv2.INTER_CUBIC)
            img050 = cv2.resize(img, (int(0.50*H),int(0.50*W)), cv2.INTER_CUBIC)
            prediction100 = model.predict(np.expand_dims(img, axis=0))
            prediction075 = model.predict(np.expand_dims(img075, axis=0))
            prediction050 = model.predict(np.expand_dims(img050, axis=0))

            prediction100 = np.reshape(prediction100, (H*W, NUM_CLASSES))
            prediction075 = np.reshape(prediction075, (H*W, NUM_CLASSES))
            prediction050 = np.reshape(prediction050, (H*W, NUM_CLASSES))
            prediction = np.maximum(prediction100, prediction075, prediction050)

        prediction = np.argmax(prediction, axis=-1)
        pred_img = np.reshape(prediction, (H,W))
        gt_img = val_labels[i]
        gt = gt_img[gt_img>=0]
        pred = pred_img[gt_img>=0]
        conf_mat = confusion_matrix(gt, pred, labels=list(range(NUM_CLASSES))) 
        conf_mat_total += conf_mat

    ious = np.zeros((NUM_CLASSES,1))
    for l in range(NUM_CLASSES):
        ious[l] = conf_mat_total[l,l] / (np.sum(conf_mat_total[l,:]) +
                                         np.sum(conf_mat_total[:,l]) -
                                         conf_mat_total[l,l])
    
    print(ious)
    print('Mean IOU = {}\n'.format(np.mean(ious)))
from sqlalchemy.orm.exc import NoResultFound
from demokratikollen.core.utils import postgres as pg_utils
import os
import psycopg2 as pg
from progress_bar import InitBar

# Connect to SQLAlchemy db and create structure
engine = create_engine(pg_utils.engine_url())
session = sessionmaker()
session.configure(bind=engine)
s = session()

# Party votes
num_party_votes = 0
num_polls = s.query(func.count(PolledPoint.id)).scalar()
pbar = InitBar(title="Computing party votes")
pbar(0)
n = 0
for polled_point in s.query(PolledPoint):
    n = n + 1
    if n % 10 == 0:
        pbar(100 * n / num_polls)


    votes = s.query(Vote).join(Member).join(Party) \
        .filter(Vote.polled_point_id == polled_point.id) \
        .order_by(Party.id)

    current_party_id = 0
    for vote in votes:
        if current_party_id != vote.member.party_id:
def main():
    """Shows basic usage of the Google Drive API.

    Creates a Google Drive API service object and outputs the names and IDs
    for up to 10 files.
    """

    log_filename = os.path.join(
        args.log_dir,
        'google-drive-to-s3-{}.log'.format(os.path.basename(time.strftime('%Y%m%d-%H%M%S')))
    )

    # register some logging handlers
    log_handler = FileHandler(
        log_filename,
        mode='w',
        level=args.log_level,
        bubble=True
    )
    stdout_handler = StreamHandler(sys.stdout, level=args.log_level, bubble=True)

    with stdout_handler.applicationbound():
        with log_handler.applicationbound():
            log.info("Arguments: {}".format(args))
            start = time.time()
            log.info("starting at {}".format(time.strftime('%l:%M%p %Z on %b %d, %Y')))

            credentials = get_credentials()
            http = credentials.authorize(httplib2.Http())
            drive_service = discovery.build('drive', 'v3', http=http)

            s3 = boto3.resource('s3')

            # load up a match file if we have one.
            if args.match_file:
                with open(args.match_file, 'r') as f:
                    match_filenames = f.read().splitlines()
            else:
                match_filenames = None

            # get the files in the specified folder.
            files = drive_service.files()
            request = files.list(
                pageSize=args.page_size,
                q="'{}' in parents".format(args.folder_id),
                fields="nextPageToken, files(id, name)"
            )

            # make sure our S3 Key prefix has a trailing slash
            key_prefix = ensure_trailing_slash(args.key_prefix)

            page_counter = 0
            file_counter = 0
            while request is not None:
                file_page = request.execute(http=http)
                page_counter += 1
                page_file_counter = 0  # reset the paging file counter

                # determine the page at which to start processing.
                if page_counter >= args.start_page:
                    log.info(u"######## Page {} ########".format(page_counter))

                    for this_file in file_page['files']:
                        file_counter += 1
                        page_file_counter += 1
                        if we_should_process_this_file(this_file['name'], match_filenames):
                            log.info(u"#== Processing {} file number {} on page {}. {} files processed.".format(
                                this_file['name'],
                                page_file_counter,
                                page_counter,
                                file_counter
                            ))

                            # download the file
                            download_request = drive_service.files().get_media(fileId=this_file['id'])
                            fh = io.BytesIO()  # Using an in memory stream location
                            downloader = MediaIoBaseDownload(fh, download_request)
                            done = False
                            pbar = InitBar(this_file['name'])
                            while done is False:
                                status, done = downloader.next_chunk()
                                pbar(int(status.progress()*100))
                                # print("\rDownload {}%".format(int(status.progress() * 100)))
                            del pbar

                            # upload to bucket
                            log.info(u"Uploading to S3")
                            s3.Bucket(args.bucket).put_object(
                                Key="{}{}".format(key_prefix, this_file['name']),
                                Body=fh.getvalue(),
                                ACL='public-read'
                            )
                            log.info(u"Uploaded to S3")
                            fh.close()  # close the file handle to release memory
                        else:
                            log.info(u"Do not need to process {}".format(this_file['name']))

                # stop if we have come to the last user specified page
                if args.end_page and page_counter == args.end_page:
                    log.info(u"Finished paging at page {}".format(page_counter))
                    break
                # request the next page of files
                request = files.list_next(request, file_page)

            log.info("Running time: {}".format(str(datetime.timedelta(seconds=(round(time.time() - start, 3))))))
            log.info("Log written to {}:".format(log_filename))
示例#12
0
def main():
    training_images, training_labels, test_images, test_labels = load_dataset()

    # plt.imshow(training_images[:,:,0], cmap='gray')
    # plt.show()

    perm_train = np.random.permutation(training_labels.size)
    training_labels = training_labels[perm_train]
    training_images = (training_images[perm_train, :, :] - 127.5) / 127.5
    training_images = np.expand_dims(training_images, -1)
    print(training_images.shape)
    test_images = test_images / 255.0
    test_images = np.expand_dims(test_images, -1)

    # pdb.set_trace()

    #    training_labels = to_categorical(training_labels, NUM_CLASSES)
    #    test_labels = to_categorical(test_labels, NUM_CLASSES)

    BATCH_SIZE = 32 * 8
    WIDTH, HEIGHT = 28, 28
    adam_lr = 0.0002
    adam_beta_1 = 0.5

    #####################################
    ### Defiining the Discriminator:
    #####################################
    input_D = Input(shape=(HEIGHT, WIDTH, 1), name='input_D')
    x = Conv2D(filters=32,
               kernel_size=3,
               strides=(2, 2),
               padding='same',
               name='conv1_D')(input_D)
    #x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(filters=32,
               kernel_size=3,
               strides=(2, 2),
               padding='same',
               name='conv2_D')(x)
    #x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Flatten()(x)
    x = Dense(128, activation='relu', name='dense1_D')(x)
    output_D = Dense(1, activation='sigmoid', name='output_D')(x)
    model_D = Model(inputs=input_D, outputs=output_D)
    model_D.compile(loss='binary_crossentropy',
                    optimizer=tf.train.AdamOptimizer(learning_rate=adam_lr,
                                                     beta1=adam_beta_1),
                    metrics=['accuracy'])

    #####################################
    ### Defiining the Generator:
    #####################################
    LATENT_SIZE = 100
    input_G = Input(shape=(LATENT_SIZE, ), name='input_gen')
    x = Dense(7 * 7 * 32, activation='linear', name='Dense1_G')(input_G)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Reshape((7, 7, 32))(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(filters=32,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               name='conv1_gen')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(filters=32,
               kernel_size=3,
               strides=(1, 1),
               padding='same',
               name='conv2_gen')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(filters=1,
               kernel_size=1,
               strides=(1, 1),
               padding='same',
               name='conv3_gen')(x)
    img_G = Activation('tanh')(x)
    model_G = Model(inputs=input_G, outputs=img_G)
    model_G.compile(loss='binary_crossentropy',
                    optimizer=tf.train.AdamOptimizer(learning_rate=adam_lr,
                                                     beta1=adam_beta_1))

    #####################################
    ### Defiining the Combined GAN:
    #####################################
    model_D.trainable = False  # Since model_D is already compiled, thediscriminator model remains trainble,
    # but here in the combined model it becomes non-trainable
    input_main = Input(
        shape=(LATENT_SIZE, ), name='input_main'
    )  # Note that this input should be different from the input to Generator
    combined = Model(inputs=input_main, outputs=model_D(model_G(input_main)))
    combined.compile(loss='binary_crossentropy',
                     optimizer=tf.train.AdamOptimizer(learning_rate=adam_lr,
                                                      beta1=adam_beta_1),
                     metrics=['accuracy'])

    print(combined.summary())

    #####################################
    ### Training:
    #####################################
    bar = InitBar()
    N = training_images.shape[0]
    for iter in range(100):
        fake_input = np.random.randn(1, LATENT_SIZE)
        fake_image = model_G.predict(fake_input)
        loss_G, acc_G, loss_D, acc_D = 0, 0, 0, 0
        steps = (int)(np.ceil(float(N) / float(BATCH_SIZE)))
        for batch_iter in range(steps):
            bar(100.0 * batch_iter / float(steps))
            real_image, _ = get_batch(batch_iter, BATCH_SIZE / 2,
                                      training_images, training_labels)
            ####################
            ## Discriminator Training
            ####################
            #  Note that if using BN layer in Discriminator, minibatch should contain only real images or fake images.
            fake_input = np.random.randn(BATCH_SIZE / 2, LATENT_SIZE)
            fake_image = model_G.predict(fake_input)
            #real_image = get_real_mbatch(batch_sz=BATCH_SIZE/2, data=training_images)
            agg_input = np.concatenate((fake_image, real_image), axis=0)
            agg_output = np.zeros((BATCH_SIZE, ))
            agg_output[BATCH_SIZE / 2:] = 1
            perm = np.random.permutation(BATCH_SIZE)
            agg_input = agg_input[perm]
            agg_output = agg_output[perm]
            #pdb.set_trace()
            tr = model_D.train_on_batch(x=agg_input, y=agg_output)
            loss_D += tr[0]
            acc_D += tr[1]
            #####################
            ## Generator Training
            #####################
            fake_input = np.random.randn(BATCH_SIZE, LATENT_SIZE)
            fake_label = np.ones(BATCH_SIZE, )
            tr = combined.train_on_batch(x=fake_input, y=fake_label)
            loss_G += tr[0]
            acc_G += tr[1]
        print('\nG_loss = {}, G_acc = {}\nD_loss = {}, D_acc = {}'.format(
            loss_G / float(steps), acc_G / float(steps), loss_D / float(steps),
            acc_D / float(steps)))

    for iter in range(10):
        fake_input = np.random.randn(1, LATENT_SIZE)
        fake_image = model_G.predict(fake_input)
        plt.imshow(fake_image[0, :, :, 0])
        plt.show()
示例#13
0
        continue
    if organ.lower()=='eun':
        continue

    committee = s.query(Committee).filter(func.lower(Committee.abbr)==organ.lower()).one()
    s.add(CommitteeReport(
            dok_id=dok_id,
            published=publ,
            session=rm,
            code=bet,
            title=titel,
            text_url=dok_url,
            committee=committee))
# s.commit()

pbar = InitBar(title="Adding votes")
pbar(0)
polls = {}
c.execute("SELECT COUNT(*) FROM votering WHERE avser='sakfrågan'")
num_votes = c.fetchone()[0]
c_named = source_conn.cursor("named")
c_named.itersize = 50000
c_named.execute("SELECT votering_id,intressent_id,beteckning,rm,punkt,rost,datum FROM votering WHERE avser='sakfrågan' ORDER BY votering_id")
for i,(votering_id,intressent_id,beteckning,rm,punkt,rost,datum) in enumerate(c_named):
    if votering_id not in polls:
        date = datum.date()
        polls[votering_id] = PolledPoint(poll_date=date,r_votering_id=votering_id,number=punkt)
        s.add(polls[votering_id])
    if i % 1000 == 0:
        add_status = 100*i/num_votes
        pbar(add_status)
示例#14
0
def mission_to(sday, smonth, syear, fovmul, rfov, bfov, porbs, showsc, showac, tbody, vbody, folder, projectname):
    today = (time.strftime("%d/%m/%Y"))
    print " + Mission started "+sday+"/"+smonth+"/"+syear+""
    print " + Today is", today

    start = date(int(syear), int(smonth), int(sday))
    now = date(int(time.strftime("%Y")), int(time.strftime("%m")), int(time.strftime("%d")))

    delta = now - start
    print "Spacecraft have been in space for "+str(delta.days)+" days\n"

    if (projectname == "voyagerI") or (projectname == "voyagerII"):
        start = date(1980, 01, 02)
        now = date(int(time.strftime("%Y")), int(time.strftime("%m")), int(time.strftime("%d")))
        delta = now - start
        print "----------------------------------------------"
        print "NOTE FOR VOYAGER I/II: NASA Simulator doesn't\n" \
              "include full images range since 1977, only\n" \
              "starting from 01/02/1980"
        print "----------------------------------------------"

    check_folder(folder)
    headers = createheaders()

    alldelta = now - start
    max = str(delta.days)
    pbar = InitBar()

    print "Downloading images...\n"
    time.sleep(1)
    for i in range(alldelta.days + 1):
        date_to_parse = start + timedelta(days=i)
        url = make_url(date_to_parse, tbody, vbody, fovmul, rfov, bfov, porbs, showsc, showac)
        filename = make_filename(folder, projectname, "above", date_to_parse, tbody, vbody)
        current = int((int(i) * 100)/int(max))
        #print i, current, max
        pbar(current)


        # Check if file already exists on system
        if os.path.exists(filename) == False:
            try:
                #print "Downloading image: ", url
                req = urllib2.Request(url, None, headers)
                f = open(filename,'wb')
                f.write(urllib2.urlopen(req).read())
                f.close()
            except:
                print "\nError on downloading the image:"
                print " ==> ", filename
                print " ==> ", url

    del pbar
    print "\n\nAll images are now on filesystem!"
    print "\n✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰✰"

    movie = raw_input('Do you want me to make a movie from those images: (y/n)')
    if movie == "y":
        print "Let's make a video from the images. This process may take some time depending on your computer..."
        make_video_from_folder(folder, projectname)
    else:
        print "No problem, you can resume this action by running this program again."
        sys.exit()
示例#15
0
def populate_build(db,
                   build_count,
                   builders_list,
                   projects,
                   user_names,
                   verbose=True):
    """
    :param db: a handler to the DBConnection object
    :param build_count: an integer value with number of new builds
    :param builders_list: a list of builders. The builder is a BuilderConfig object
    :param projects: a list of a ProjectConfig objects
    :param user_names: a list of an usernames (identifier) from the database
    :param verbose: a boolean value indicate to print all information to std output
    """
    from progress_bar import InitBar

    def handler(result, counter, *args):
        result[counter] += 1

    progress_bar = InitBar(size=build_count, stream=sys.stdout)

    if verbose:
        print("Starting creating builds")
    res = {
        'created': 0,
        'skipped': 0,
    }

    for number in range(build_count):
        builder = random.choice(builders_list)
        codebases = random.choice(projects[builder.project].codebases)
        codebase = random.choice(codebases.keys())
        repository = codebases[codebase]
        submitted_at = datetime2epoch(
            datetime.datetime.now() + datetime.timedelta(
                seconds=random.randint(-3 * 60 * 60, -3 * 60 * 60)))
        complete_at = submitted_at + random.randint(60 * 60, 3 * 60 * 60)
        build = {
            'branch':
            repository['branch'],
            'revision':
            "%032x" % random.getrandbits(160),  # Random sha-1 hash
            'repository':
            repository['repository'],
            'codebase':
            codebase,
            'project':
            builder.project,
            'reason':
            'A build was forced by {username} {username}@localhost'.format(
                username=random.choice(user_names)),
            'submitted_at':
            submitted_at,
            'complete_at':
            complete_at,
            'buildername':
            builder.name,
            'slavepool':
            None,
            'number':
            number,
            'slavename':
            random.choice(builder.slavenames),
            'results':
            random.choice(COMPLETED_RESULTS),
        }
        promise = db.builds.createFullBuildObject(**build)
        promise.addCallback(lambda *args: handler(res, 'created'))
        promise.addErrback(lambda *args: handler(res, 'skipped'))
        yield promise

        if verbose:
            progress_bar(number + 1)

    if verbose:
        print()
        print("Created %d new builds, %d skipped" %
              (res['created'], res['skipped']))