コード例 #1
0
	def build_srcMenu( self, *a ):
		cmd.setParent( a[ 0 ], menu=True )
		cmd.menu( a[ 0 ], e=True, dai=True )

		cmd.menuItem( l='Add Selected Objects', c=self.on_addSrc )
		cmd.menuItem( l='Replace With Selected Objects', c=self.on_replaceSrc )
		cmd.menuItem( d=True )
		cmd.menuItem( l='Remove Highlighted Item', c=self.on_removeSrcItem )
		cmd.menuItem( d=True )
		cmd.menuItem( l='Select All Objects', c=self.on_selectAllSrc )
		cmd.menuItem( d=True )
		cmd.menuItem( l='Save Mapping...', c=self.on_saveMapping )
		cmd.menuItem( l='Load Mapping...', sm=True )
		pm = PresetManager( TOOL_NAME, EXT )
		presets = pm.listAllPresets( True )

		for loc in LOCAL, GLOBAL:
			for f in presets[ loc ]:
				f = Path( f )
				cmd.menuItem( l=f.name(), c=utils.Callback( self.loadMappingFile, f ) )

		cmd.menuItem( d=True )
		cmd.menuItem( l='Manage Mappings...', c=lambda *x: presetsUI.load( TOOL_NAME, LOCAL, EXT ) )

		cmd.setParent( '..', menu=True )
		cmd.menuItem( d=True )
		cmd.menuItem( l='Swap Mapping', c=self.on_swap )
コード例 #2
0
    def set_effects(self, kwargs: dict):
        self.effects = {}
        no_effects_list = create_empty_callbacks_cl(self.frames_count)

        for item in ('preprocess_first', 'preprocess_second',
                     'postprocess_first', 'postprocess_second'):

            value = kwargs.get(item, no_effects_list)

            print(f'@@ set_effects!! item={item} of type={type(value)}')

            if isclass(value):
                # noinspection PyTypeChecker
                if issubclass(value, ImageTravel):
                    value: ImageTravel = value(
                        image=self.im1,
                        frames_count=self.frames_count,
                        frame_resolution=utils.get_image_size(self.im1))

            if isinstance(value, ImageTravel):
                print('TRANSITION: if isinstance(value, ImageTravel):')
                cb = utils.Callback(fun_ptr=value.next_frame,
                                    needs_params=('frame_index', 'im1'))

                value: utils.CircleList = utils.CircleList(
                    list_or_tuple=[cb], max_iterations=self.frames_count)

            if isinstance(value, (list, tuple)):
                value: utils.CircleList = utils.CircleList(
                    list_or_tuple=value, max_iterations=self.frames_count)
            if not isinstance(value, utils.CircleList):
                raise TypeError
            value.max_iterations = self.frames_count
            self.effects[item] = value
コード例 #3
0
    def __init__(
            self,
            image_sources: List[utils.image_types],
            transitions_list: List[Type[trans.Transition]] or None = None,
            frame_resolution=(1920, 1080),
            **params):

        """
        Wizard slideshow
        :param image_sources:
        :param transitions_list:
        :param frame_resolution:
        # :param total_slide_duration:
        :keyword params:
            :key fps: 24 or 25
            :key slide_seconds: = 10
            :key frame_file_type: 'jpg' or 'png', other value or key unset gives 'jpg' by default
        """

        '''
        [1] Check number of image sources, compare it to number of transitions
        [2] Check FPS, frame_resolution, calculate important data for rendering slides
        [3] Assign images to timeline, calculate transitions timing
        '''

        if len(image_sources) < 2:
            raise ValueError

        if len(image_sources) > 1:
            if len(transitions_list) == 0:
                transitions_list = [
                    trans.BlendingTransition
                ] * (len(image_sources) - 1) if transitions_list is None else transitions_list
        elif transitions_list == 0:
            transitions_list = []

        if len(transitions_list) >= len(image_sources):
            if len(image_sources) > 1:
                transitions_list = transitions_list[:len(image_sources) - 1]
            else:
                transitions_list = [transitions_list[0]]

        super().__init__(**{k: v for k, v in {**params, **locals()}.items() if k != 'self'})

        self.frame_resolution = frame_resolution
        self.global_frame_index = -1

        fft = params.get('frame_file_type', 'jpg')
        fft = 'jpg' if fft not in ['jpg', 'png'] else fft

        self.frame_file_type = fft
        self.image_handlers: List[ImageHandler] = []

        self.total_slide_duration = len(self.image_sources) * self.fps * (self.slide_seconds + self.transition_seconds)

        step = self.total_slide_duration // len(self.image_sources)
        for pos, next_source in enumerate(self.image_sources):
            self.image_handlers.append(
                ImageHandler(
                    full_path=next_source,
                    image_type=numpy.ndarray,
                    resolution=frame_resolution,
                    frame_indexes=[step * pos]
                )
            )

        for handler in self.image_handlers:
            print(' > handler.indexes: ', handler.get_frame_indexes())

        image_picker_cb = utils.Callback(fun_ptr=self.pick_image_for_frame, needs_params=('frame_index',))

        # TIMELINE CONTROLLER - MAIN CONTROL OF SELECTING IMAGE FOR FRAME
        self.slide_frames = ClipUtils.KeyedEffectsStack(
            ClipUtils.StillImage(
                frames_count=self.total_slide_duration,
                image=image_picker_cb
            ),
        )

        '''
         >> For each 500 frames do Zoom in & zoom out.
         >> To do so, we need some calculations and later assign it to slide frame controller
        '''

        travel_duration = (self.slide_seconds + self.transition_seconds) * self.fps
        travellers_circlelist = Travels.get_random_travellers_circlelist(travels_count=len(self.image_sources), debug=True)

        print('\t\ttraveller_circlelist: ', travellers_circlelist)

        self.images_traveller_controller: List[Dict] = []

        for next_index in range(0, self.total_slide_duration, travel_duration):
            self.images_traveller_controller.append({
                'start': next_index,
                'stop': next_index + travel_duration,
                'traveller': travellers_circlelist.next()
            })
            print('>>> images_traveller_controller: ', self.images_traveller_controller[-1])

        # position of start, end, selected transition type
        self.slide_transitions_timing: List[Dict] = []

        trans_len = len(self.transitions_list)
        self.one_trans_len = params.get('transition')  #  (params['slide_seconds'] * self.fps) // 4
        # self.one_slide_len = self.total_slide_duration // len(self.image_sources)

        for nr, next_trans in enumerate(self.transitions_list):
            start = (nr + 1) * (self.slide_seconds + self.transition_seconds) * self.fps
            stop = start + self.transition_seconds * self.fps
            self.slide_transitions_timing.append({
                'nr': nr,
                'start': start,
                'stop': start + (self.transition_seconds * self.fps),
                'transition': next_trans
            })
            # print(f'***]]] added {nr} -> {next_trans}')

        print('slide transitions timings:')
        for stt in self.slide_transitions_timing:
            print('>> timing: ', stt)

        print('\nWIZARD __init__ LOCALS()')
        for k, v in locals().items():
            print('\t' + k, ' -> ', v)
        print('WIZARD __init__ ENDS HERE\n')
コード例 #4
0
def run(args: argparse.ArgumentParser) -> None:
    torch.manual_seed(args.seed)

    dataset, hg, train_idx, valid_idx, test_idx = utils.process_dataset(
        args.dataset,
        root=args.dataset_root,
    )
    predict_category = dataset.predict_category
    labels = hg.nodes[predict_category].data['labels']

    training_device = torch.device('cuda' if args.gpu_training else 'cpu')
    inference_device = torch.device('cuda' if args.gpu_inference else 'cpu')

    inferfence_mode = args.inference_mode

    fanouts = [int(fanout) for fanout in args.fanouts.split(',')]

    train_sampler = dgl.dataloading.MultiLayerNeighborSampler(fanouts)
    train_dataloader = dgl.dataloading.NodeDataLoader(
        hg,
        {predict_category: train_idx},
        train_sampler,
        batch_size=args.batch_size,
        shuffle=True,
        drop_last=False,
        num_workers=args.num_workers,
    )

    if inferfence_mode == 'neighbor_sampler':
        valid_sampler = dgl.dataloading.MultiLayerNeighborSampler(fanouts)
        valid_dataloader = dgl.dataloading.NodeDataLoader(
            hg,
            {predict_category: valid_idx},
            valid_sampler,
            batch_size=args.eval_batch_size,
            shuffle=False,
            drop_last=False,
            num_workers=args.eval_num_workers,
        )

        if args.test_validation:
            test_sampler = dgl.dataloading.MultiLayerNeighborSampler(fanouts)
            test_dataloader = dgl.dataloading.NodeDataLoader(
                hg,
                {predict_category: test_idx},
                test_sampler,
                batch_size=args.eval_batch_size,
                shuffle=False,
                drop_last=False,
                num_workers=args.eval_num_workers,
            )
    else:
        valid_dataloader = None

        if args.test_validation:
            test_dataloader = None

    in_feats = hg.nodes[predict_category].data['feat'].shape[-1]
    out_feats = dataset.num_classes

    num_nodes = {}
    node_feats = {}

    for ntype in hg.ntypes:
        num_nodes[ntype] = hg.num_nodes(ntype)
        node_feats[ntype] = hg.nodes[ntype].data.get('feat')

    activations = {'leaky_relu': F.leaky_relu, 'relu': F.relu}

    embedding_layer = RelGraphEmbedding(hg, in_feats, num_nodes, node_feats)
    model = EntityClassify(
        hg,
        in_feats,
        args.hidden_feats,
        out_feats,
        args.num_bases,
        args.num_layers,
        norm=args.norm,
        layer_norm=args.layer_norm,
        input_dropout=args.input_dropout,
        dropout=args.dropout,
        activation=activations[args.activation],
        self_loop=args.self_loop,
    )

    loss_function = nn.CrossEntropyLoss()

    embedding_optimizer = torch.optim.SparseAdam(
        embedding_layer.node_embeddings.parameters(), lr=args.embedding_lr)

    if args.node_feats_projection:
        all_parameters = chain(model.parameters(),
                               embedding_layer.embeddings.parameters())
        model_optimizer = torch.optim.Adam(all_parameters, lr=args.model_lr)
    else:
        model_optimizer = torch.optim.Adam(model.parameters(),
                                           lr=args.model_lr)

    checkpoint = utils.Callback(args.early_stopping_patience,
                                args.early_stopping_monitor)

    print('## Training started ##')

    for epoch in range(args.num_epochs):
        train_time, train_loss, train_accuracy = train(
            embedding_layer,
            model,
            training_device,
            embedding_optimizer,
            model_optimizer,
            loss_function,
            labels,
            predict_category,
            train_dataloader,
        )
        valid_time, valid_loss, valid_accuracy = validate(
            embedding_layer,
            model,
            inference_device,
            inferfence_mode,
            loss_function,
            hg,
            labels,
            predict_category=predict_category,
            dataloader=valid_dataloader,
            eval_batch_size=args.eval_batch_size,
            eval_num_workers=args.eval_num_workers,
            mask=valid_idx,
        )

        checkpoint.create(
            epoch,
            train_time,
            valid_time,
            train_loss,
            valid_loss,
            train_accuracy,
            valid_accuracy,
            {
                'embedding_layer': embedding_layer,
                'model': model
            },
        )

        print(f'Epoch: {epoch + 1:03} '
              f'Train Loss: {train_loss:.2f} '
              f'Valid Loss: {valid_loss:.2f} '
              f'Train Accuracy: {train_accuracy:.4f} '
              f'Valid Accuracy: {valid_accuracy:.4f} '
              f'Train Epoch Time: {train_time:.2f} '
              f'Valid Epoch Time: {valid_time:.2f}')

        if checkpoint.should_stop:
            print('## Training finished: early stopping ##')

            break
        elif epoch >= args.num_epochs - 1:
            print('## Training finished ##')

    print(f'Best Epoch: {checkpoint.best_epoch} '
          f'Train Loss: {checkpoint.best_epoch_train_loss:.2f} '
          f'Valid Loss: {checkpoint.best_epoch_valid_loss:.2f} '
          f'Train Accuracy: {checkpoint.best_epoch_train_accuracy:.4f} '
          f'Valid Accuracy: {checkpoint.best_epoch_valid_accuracy:.4f}')

    if args.test_validation:
        print('## Test data validation ##')

        embedding_layer.load_state_dict(
            checkpoint.best_epoch_model_parameters['embedding_layer'])
        model.load_state_dict(checkpoint.best_epoch_model_parameters['model'])

        test_time, test_loss, test_accuracy = validate(
            embedding_layer,
            model,
            inference_device,
            inferfence_mode,
            loss_function,
            hg,
            labels,
            predict_category=predict_category,
            dataloader=test_dataloader,
            eval_batch_size=args.eval_batch_size,
            eval_num_workers=args.eval_num_workers,
            mask=test_idx,
        )

        print(f'Test Loss: {test_loss:.2f} '
              f'Test Accuracy: {test_accuracy:.4f} '
              f'Test Epoch Time: {test_time:.2f}')
コード例 #5
0
print(sk.show())
exit()

second = utils.CircleList([
    pil_effects.bold_contour_edges, pil_effects.sharpen_saturation,
    pil_effects.zoom_and_crop
])

import animations.transitions_classes_OLD as tc

tc.FirstImageHorizontalStretchTransition(image1="source_images/1.jpg",
                                         image2="source_images/3.jpg",
                                         frames_count=50,
                                         while_process_second=second).render()

edges_callback = utils.Callback(fun_ptr=pil_effects.bold_contour_edges, )

circle_list = utils.CircleList(list_or_tuple=[
    pil_effects.sharpen_saturation,
    edges_callback,
],
                               max_iterations=25)

tc.FirstImageZoomBlendTransition(image1="source_images/1.jpg",
                                 image2="source_images/3.jpg",
                                 frames_count=25,
                                 while_process_second=circle_list).render()

bt = tc.CompositeMaskedSaturationTransition(
    image1="source_images/1.jpg",
    image2="source_images/3.jpg",