Frage

Ich versuche, einen Miniaturanbeller mit GST-Python zu bauen, es sieht so aus.

from __future__ import division

import sys
import logging
import pdb

_log = logging.getLogger(__name__)
logging.basicConfig()
_log.setLevel(logging.DEBUG)

try:
    import gobject
    gobject.threads_init()
except:
    raise Exception('gobject could not be found')

try:
    import pygst
    pygst.require('0.10')
    import gst
    from gst import pbutils
    from gst.extend import discoverer
except:
    raise Exception('gst/pygst 0.10 could not be found')

class VideoThumbnailer:
    '''
        Creates a video thumbnail

     - Sets up discoverer & transcoding pipeline.
       Discoverer finds out information about the media file
     - Launches gobject.MainLoop, this triggers the discoverer to start running
     - Once the discoverer is done, it calls the __discovered callback function
     - The __discovered callback function launches the transcoding process
     - The _on_message callback is called from the transcoding process until it
       gets a message of type gst.MESSAGE_EOS, then it calls __stop which shuts
       down the gobject.MainLoop
    '''

    def __init__(self, src, dst, **kwargs):
        _log.info('Initializing VideoThumbnailer...')

        # Set instance variables
        self.loop = gobject.MainLoop()
        self.source_path = src
        self.destination_path = dst
        self.destination_dimensions = kwargs.get('dimensions') or (180, 180)

        if not type(self.destination_dimensions) == tuple:
            raise Exception('dimensions must be tuple: (width, height)')

        # Run setup
        self._setup()
        # Run.
        self._run()

    def _setup(self):
        self._setup_pipeline()
        self._setup_discover()

    def _run(self):
        _log.info('Discovering...')
        self.discoverer.discover()
        _log.info('Done')

        _log.debug('Initializing MainLoop()')
        self.loop.run()

    def _setup_discover(self):
        self.discoverer = discoverer.Discoverer(self.source_path)

        # Connect self.__discovered to the 'discovered' event
        self.discoverer.connect('discovered', self.__discovered)

    def __discovered(self, data, is_media):
        '''
        Callback for media discoverer.
        '''
        if not is_media:
            self.__stop()
            raise Exception('Could not discover {0}'.format(self.source_path))

        _log.debug('__discovered, data: {0}'.format(data))

        self.data = data

        # Run any tasks that depend on the info from the discovery
        self._on_discovered()

        # Tell the transcoding pipeline to start running
        #self.pipeline.set_state(gst.STATE_PLAYING)
        _log.info('Transcoding...')

    def _on_discovered(self):
        self.__setup_capsfilter()

    def _setup_pipeline(self):
        # Create a new pipeline
        self.pipeline = gst.Pipeline('VideoThumbnailerPipeline')

        # Create the elements in the pipeline
        self.filesrc = gst.element_factory_make('filesrc', 'filesrc')
        self.filesrc.set_property('location', self.source_path)
        self.pipeline.add(self.filesrc)

        self.decoder = gst.element_factory_make('decodebin2', 'decoder')
        self.decoder.connect('new-decoded-pad', self._on_dynamic_pad)
        self.pipeline.add(self.decoder)

        self.ffmpegcolorspace = gst.element_factory_make(
            'ffmpegcolorspace', 'ffmpegcolorspace')
        self.pipeline.add(self.ffmpegcolorspace)

        self.videoscale = gst.element_factory_make('videoscale', 'videoscale')
        self.videoscale.set_property('method', 'bilinear')
        self.pipeline.add(self.videoscale)

        self.capsfilter = gst.element_factory_make('capsfilter', 'capsfilter')
        self.pipeline.add(self.capsfilter)

        self.jpegenc = gst.element_factory_make('jpegenc', 'jpegenc')
        self.pipeline.add(self.jpegenc)

        self.filesink = gst.element_factory_make('filesink', 'filesink')
        self.filesink.set_property('location', self.destination_path)
        self.pipeline.add(self.filesink)

        # Link all the elements together
        self.filesrc.link(self.decoder)
        self.ffmpegcolorspace.link(self.videoscale)
        self.videoscale.link(self.capsfilter)
        self.capsfilter.link(self.jpegenc)
        self.jpegenc.link(self.filesink)

        self._setup_bus()

    def _on_dynamic_pad(self, dbin, pad, islast):
        '''
        Callback called when ``decodebin2`` has a pad that we can connect to
        '''
        # Intersect the capabilities of the video sink and the pad src
        # Then check if they have common capabilities.
        if not self.ffmpegcolorspace.get_pad_template('sink')\
                .get_caps().intersect(pad.get_caps()).is_empty():
            # It IS a video src pad.
            pad.link(self.ffmpegcolorspace.get_pad('sink'))

    def _setup_bus(self):
        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect('message', self._on_message)

    def __setup_capsfilter(self):
        caps = ['video/x-raw-rgb']

        if self.data.videoheight > self.data.videowidth:
            # Whoa! We have ourselves a portrait video!
            caps.append('height={0}'.format(
                    self.destination_dimensions[1]))
        else:
            # It's a landscape, phew, how normal.
            caps.append('width={0}'.format(
                    self.destination_dimensions[0]))

        self.capsfilter.set_property(
            'caps',
            gst.caps_from_string(
                ', '.join(caps)))

    def _on_message(self, bus, message):
        _log.debug((bus, message))

        t = message.type

        if t == gst.MESSAGE_EOS:
            self.__stop()
            _log.info('Done')
        elif t == gst.MESSAGE_ERROR:
            _log.error((bus, message))
            self.__stop()

    def __stop(self):
        _log.debug(self.loop)

        self.pipeline.set_state(gst.STATE_NULL)

        gobject.idle_add(self.loop.quit)

Was es macht

  1. DateiRC lädt eine Videodatei
  2. Decodebin2 Demuxiert die Videodatei und verbindet das Video -SRC -PAD mit dem FFMPegColorSpace -Speak
  3. ffmpegcolorSpace macht alles mit den Farbräumen des Videostreams
  4. Videoscale skaliert das Video
  5. CapsFilter fordert Videoscale an
  6. JPeGenc erfasst einen einzelnen Rahmen
  7. DateiSink speichert die JPEG -Datei

Was ich möchte, dass es tut

  1. DateiRC lädt eine Videodatei
  2. Decodebin2 Demuxiert die Videodatei und verbindet das Video -SRC -PAD mit dem FFMPegColorSpace -Speak
  3. ffmpegcolorSpace macht alles mit den Farbräumen des Videostreams
  4. Videoscale skaliert das Video
  5. CapsFilter fordert Videoscale an
  6. JPeGenc erfasst einen einzelnen Rahmen Bei 30% in das Video
  7. DateiSink speichert die JPEG -Datei

Ich habe es versucht mit

    self.decoder.seek_simple(
        gst.FORMAT_PERCENT,
        gst.SEEK_FLAG_FLUSH,
        self.WADSWORTH_CONSTANT) # int(30)

platziert in _on_dynamic_pad, nach dem Pad -Verknüpfen, leider ohne Erfolg.

War es hilfreich?
Lizenziert unter: CC-BY-SA mit Zuschreibung
Nicht verbunden mit StackOverflow
scroll top