我正在尝试使用GST-Python构建视频缩略图,看起来像这样。

from __future__ import division

import sys
import logging
import pdb

_log = logging.getLogger(__name__)
logging.basicConfig()
_log.setLevel(logging.DEBUG)

try:
    import gobject
    gobject.threads_init()
except:
    raise Exception('gobject could not be found')

try:
    import pygst
    pygst.require('0.10')
    import gst
    from gst import pbutils
    from gst.extend import discoverer
except:
    raise Exception('gst/pygst 0.10 could not be found')

class VideoThumbnailer:
    '''
        Creates a video thumbnail

     - Sets up discoverer & transcoding pipeline.
       Discoverer finds out information about the media file
     - Launches gobject.MainLoop, this triggers the discoverer to start running
     - Once the discoverer is done, it calls the __discovered callback function
     - The __discovered callback function launches the transcoding process
     - The _on_message callback is called from the transcoding process until it
       gets a message of type gst.MESSAGE_EOS, then it calls __stop which shuts
       down the gobject.MainLoop
    '''

    def __init__(self, src, dst, **kwargs):
        _log.info('Initializing VideoThumbnailer...')

        # Set instance variables
        self.loop = gobject.MainLoop()
        self.source_path = src
        self.destination_path = dst
        self.destination_dimensions = kwargs.get('dimensions') or (180, 180)

        if not type(self.destination_dimensions) == tuple:
            raise Exception('dimensions must be tuple: (width, height)')

        # Run setup
        self._setup()
        # Run.
        self._run()

    def _setup(self):
        self._setup_pipeline()
        self._setup_discover()

    def _run(self):
        _log.info('Discovering...')
        self.discoverer.discover()
        _log.info('Done')

        _log.debug('Initializing MainLoop()')
        self.loop.run()

    def _setup_discover(self):
        self.discoverer = discoverer.Discoverer(self.source_path)

        # Connect self.__discovered to the 'discovered' event
        self.discoverer.connect('discovered', self.__discovered)

    def __discovered(self, data, is_media):
        '''
        Callback for media discoverer.
        '''
        if not is_media:
            self.__stop()
            raise Exception('Could not discover {0}'.format(self.source_path))

        _log.debug('__discovered, data: {0}'.format(data))

        self.data = data

        # Run any tasks that depend on the info from the discovery
        self._on_discovered()

        # Tell the transcoding pipeline to start running
        #self.pipeline.set_state(gst.STATE_PLAYING)
        _log.info('Transcoding...')

    def _on_discovered(self):
        self.__setup_capsfilter()

    def _setup_pipeline(self):
        # Create a new pipeline
        self.pipeline = gst.Pipeline('VideoThumbnailerPipeline')

        # Create the elements in the pipeline
        self.filesrc = gst.element_factory_make('filesrc', 'filesrc')
        self.filesrc.set_property('location', self.source_path)
        self.pipeline.add(self.filesrc)

        self.decoder = gst.element_factory_make('decodebin2', 'decoder')
        self.decoder.connect('new-decoded-pad', self._on_dynamic_pad)
        self.pipeline.add(self.decoder)

        self.ffmpegcolorspace = gst.element_factory_make(
            'ffmpegcolorspace', 'ffmpegcolorspace')
        self.pipeline.add(self.ffmpegcolorspace)

        self.videoscale = gst.element_factory_make('videoscale', 'videoscale')
        self.videoscale.set_property('method', 'bilinear')
        self.pipeline.add(self.videoscale)

        self.capsfilter = gst.element_factory_make('capsfilter', 'capsfilter')
        self.pipeline.add(self.capsfilter)

        self.jpegenc = gst.element_factory_make('jpegenc', 'jpegenc')
        self.pipeline.add(self.jpegenc)

        self.filesink = gst.element_factory_make('filesink', 'filesink')
        self.filesink.set_property('location', self.destination_path)
        self.pipeline.add(self.filesink)

        # Link all the elements together
        self.filesrc.link(self.decoder)
        self.ffmpegcolorspace.link(self.videoscale)
        self.videoscale.link(self.capsfilter)
        self.capsfilter.link(self.jpegenc)
        self.jpegenc.link(self.filesink)

        self._setup_bus()

    def _on_dynamic_pad(self, dbin, pad, islast):
        '''
        Callback called when ``decodebin2`` has a pad that we can connect to
        '''
        # Intersect the capabilities of the video sink and the pad src
        # Then check if they have common capabilities.
        if not self.ffmpegcolorspace.get_pad_template('sink')\
                .get_caps().intersect(pad.get_caps()).is_empty():
            # It IS a video src pad.
            pad.link(self.ffmpegcolorspace.get_pad('sink'))

    def _setup_bus(self):
        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect('message', self._on_message)

    def __setup_capsfilter(self):
        caps = ['video/x-raw-rgb']

        if self.data.videoheight > self.data.videowidth:
            # Whoa! We have ourselves a portrait video!
            caps.append('height={0}'.format(
                    self.destination_dimensions[1]))
        else:
            # It's a landscape, phew, how normal.
            caps.append('width={0}'.format(
                    self.destination_dimensions[0]))

        self.capsfilter.set_property(
            'caps',
            gst.caps_from_string(
                ', '.join(caps)))

    def _on_message(self, bus, message):
        _log.debug((bus, message))

        t = message.type

        if t == gst.MESSAGE_EOS:
            self.__stop()
            _log.info('Done')
        elif t == gst.MESSAGE_ERROR:
            _log.error((bus, message))
            self.__stop()

    def __stop(self):
        _log.debug(self.loop)

        self.pipeline.set_state(gst.STATE_NULL)

        gobject.idle_add(self.loop.quit)

它能做什么

  1. FilesRC加载视频文件
  2. DECODEBIN2符号视频文件,将视频SRC PAD连接到FFMPEGCOLORSPACE接收器
  3. ffmpegcolorspace可以用视频流的颜色空间做任何事情
  4. 视频尺度缩放视频
  5. CapsFilter告诉视频尺度将视频拟合在180x180盒子中
  6. jpegenc捕获一个框架
  7. filesink保存jpeg文件

我想做什么

  1. FilesRC加载视频文件
  2. DECODEBIN2符号视频文件,将视频SRC PAD连接到FFMPEGCOLORSPACE接收器
  3. ffmpegcolorspace可以用视频流的颜色空间做任何事情
  4. 视频尺度缩放视频
  5. CapsFilter告诉视频尺度将视频拟合在180x180盒子中
  6. jpegenc捕获一个框架 视频的30%
  7. filesink保存jpeg文件

我尝试过

    self.decoder.seek_simple(
        gst.FORMAT_PERCENT,
        gst.SEEK_FLAG_FLUSH,
        self.WADSWORTH_CONSTANT) # int(30)

放在 _on_dynamic_pad, ,链接后,可惜无济于事。

有帮助吗?
许可以下: CC-BY-SA归因
不隶属于 StackOverflow
scroll top