/*
* Copyright (C) 2010,2011,2012 Samuel Audet
*
* This file is part of JavaCV.
*
* JavaCV is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version (subject to the "Classpath" exception
* as provided in the LICENSE.txt file that accompanied this code).
*
* JavaCV is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JavaCV. If not, see <http://www.gnu.org/licenses/>.
*
*
* This file was derived from avcodec.h and other libavcodec include files from
* FFmpeg 0.11.1, which are covered by the following copyright notice:
*
* copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
package com.googlecode.javacv.cpp;
import com.googlecode.javacpp.BytePointer;
import com.googlecode.javacpp.FunctionPointer;
import com.googlecode.javacpp.IntPointer;
import com.googlecode.javacpp.LongPointer;
import com.googlecode.javacpp.Pointer;
import com.googlecode.javacpp.PointerPointer;
import com.googlecode.javacpp.ShortPointer;
import com.googlecode.javacpp.annotation.ByPtrPtr;
import com.googlecode.javacpp.annotation.ByRef;
import com.googlecode.javacpp.annotation.Cast;
import com.googlecode.javacpp.annotation.Const;
import com.googlecode.javacpp.annotation.MemberGetter;
import com.googlecode.javacpp.annotation.Name;
import com.googlecode.javacpp.annotation.Opaque;
import com.googlecode.javacpp.annotation.Platform;
import com.googlecode.javacpp.annotation.Properties;
import java.nio.Buffer;
import java.nio.ShortBuffer;
import static com.googlecode.javacpp.Loader.*;
import static com.googlecode.javacv.cpp.avutil.*;
/**
*
* @author Samuel Audet
*/
@Properties({
@Platform(define="__STDC_CONSTANT_MACROS", cinclude={
"<libavcodec/avcodec.h>", "<libavcodec/avfft.h>"},
includepath=genericIncludepath, linkpath=genericLinkpath, link={"avcodec@.54", "avutil@.51"}),
@Platform(value={"linux", "freebsd", "solaris", "sunos"}, cinclude={
"<libavcodec/vaapi.h>", "<libavcodec/vdpau.h>", "<libavcodec/xvmc.h>",
"<libavcodec/avcodec.h>", "<libavcodec/avfft.h>"}),
@Platform(value="windows", includepath=windowsIncludepath, cinclude={
"<DShow.h>", "<d3d9.h>", "<vmr9.h>", "<evr9.h>", "<libavcodec/dxva2.h>",
"<libavcodec/avcodec.h>", "<libavcodec/avfft.h>"},
linkpath=windowsLinkpath, preloadpath=windowsPreloadpath, preload="avcodec-54"),
@Platform(value="android", includepath=androidIncludepath, linkpath=androidLinkpath) })
public class avcodec {
static { load(avutil.class); load(); }
/**
* @file
* external API header
*/
// #include <version.h>
/**
* @file
* @ingroup libavc
* Libavcodec version macros.
*/
public static final int LIBAVCODEC_VERSION_MAJOR = 54;
public static final int LIBAVCODEC_VERSION_MINOR = 23;
public static final int LIBAVCODEC_VERSION_MICRO = 100;
public static final int LIBAVCODEC_VERSION_INT = AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR,
LIBAVCODEC_VERSION_MINOR,
LIBAVCODEC_VERSION_MICRO);
public static final String LIBAVCODEC_VERSION = AV_VERSION(LIBAVCODEC_VERSION_MAJOR,
LIBAVCODEC_VERSION_MINOR,
LIBAVCODEC_VERSION_MICRO);
public static final int LIBAVCODEC_BUILD = LIBAVCODEC_VERSION_INT;
public static final String LIBAVCODEC_IDENT = "Lavc" + LIBAVCODEC_VERSION;
/**
* @defgroup libavc Encoding/Decoding Library
* @{
*
* @defgroup lavc_decoding Decoding
* @{
* @}
*
* @defgroup lavc_encoding Encoding
* @{
* @}
*
* @defgroup lavc_codec Codecs
* @{
* @defgroup lavc_codec_native Native Codecs
* @{
* @}
* @defgroup lavc_codec_wrappers External library wrappers
* @{
* @}
* @defgroup lavc_codec_hwaccel Hardware Accelerators bridge
* @{
* @}
* @}
* @defgroup lavc_internal Internal
* @{
* @}
* @}
*
*/
/**
* @defgroup lavc_core Core functions/structures.
* @ingroup libavc
*
* Basic definitions, functions for querying libavcodec capabilities,
* allocating core structures, etc.
* @{
*/
/**
* Identify the syntax and semantics of the bitstream.
* The principle is roughly:
* Two decoders with the same ID can decode the same streams.
* Two encoders with the same ID can encode compatible streams.
* There may be slight deviations from the principle due to implementation
* details.
*
* If you add a codec ID to this list, add it so that
* 1. no value of a existing codec ID changes (that would break ABI),
* 2. Give it a value which when taken as ASCII is recognized uniquely by a human as this specific codec.
* This ensures that 2 forks can independently add CodecIDs without producing conflicts.
*/
public static final int // enum CodecID {
CODEC_ID_NONE = 0,
/* video codecs */
CODEC_ID_MPEG1VIDEO = 1,
CODEC_ID_MPEG2VIDEO = 2, ///< preferred ID for MPEG-1/2 video decoding
CODEC_ID_MPEG2VIDEO_XVMC = 3,
CODEC_ID_H261 = 4,
CODEC_ID_H263 = 5,
CODEC_ID_RV10 = 6,
CODEC_ID_RV20 = 7,
CODEC_ID_MJPEG = 8,
CODEC_ID_MJPEGB = 9,
CODEC_ID_LJPEG = 10,
CODEC_ID_SP5X = 11,
CODEC_ID_JPEGLS = 12,
CODEC_ID_MPEG4 = 13,
CODEC_ID_RAWVIDEO = 14,
CODEC_ID_MSMPEG4V1 = 15,
CODEC_ID_MSMPEG4V2 = 16,
CODEC_ID_MSMPEG4V3 = 17,
CODEC_ID_WMV1 = 18,
CODEC_ID_WMV2 = 19,
CODEC_ID_H263P = 20,
CODEC_ID_H263I = 21,
CODEC_ID_FLV1 = 22,
CODEC_ID_SVQ1 = 23,
CODEC_ID_SVQ3 = 24,
CODEC_ID_DVVIDEO = 25,
CODEC_ID_HUFFYUV = 26,
CODEC_ID_CYUV = 27,
CODEC_ID_H264 = 28,
CODEC_ID_INDEO3 = 29,
CODEC_ID_VP3 = 30,
CODEC_ID_THEORA = 31,
CODEC_ID_ASV1 = 32,
CODEC_ID_ASV2 = 33,
CODEC_ID_FFV1 = 34,
CODEC_ID_4XM = 35,
CODEC_ID_VCR1 = 36,
CODEC_ID_CLJR = 37,
CODEC_ID_MDEC = 38,
CODEC_ID_ROQ = 39,
CODEC_ID_INTERPLAY_VIDEO = 40,
CODEC_ID_XAN_WC3 = 41,
CODEC_ID_XAN_WC4 = 42,
CODEC_ID_RPZA = 43,
CODEC_ID_CINEPAK = 44,
CODEC_ID_WS_VQA = 45,
CODEC_ID_MSRLE = 46,
CODEC_ID_MSVIDEO1 = 47,
CODEC_ID_IDCIN = 48,
CODEC_ID_8BPS = 49,
CODEC_ID_SMC = 50,
CODEC_ID_FLIC = 51,
CODEC_ID_TRUEMOTION1 = 52,
CODEC_ID_VMDVIDEO = 53,
CODEC_ID_MSZH = 54,
CODEC_ID_ZLIB = 55,
CODEC_ID_QTRLE = 56,
CODEC_ID_SNOW = 57,
CODEC_ID_TSCC = 58,
CODEC_ID_ULTI = 59,
CODEC_ID_QDRAW = 60,
CODEC_ID_VIXL = 61,
CODEC_ID_QPEG = 62,
CODEC_ID_PNG = 63,
CODEC_ID_PPM = 64,
CODEC_ID_PBM = 65,
CODEC_ID_PGM = 66,
CODEC_ID_PGMYUV = 67,
CODEC_ID_PAM = 68,
CODEC_ID_FFVHUFF = 69,
CODEC_ID_RV30 = 70,
CODEC_ID_RV40 = 71,
CODEC_ID_VC1 = 72,
CODEC_ID_WMV3 = 73,
CODEC_ID_LOCO = 74,
CODEC_ID_WNV1 = 75,
CODEC_ID_AASC = 76,
CODEC_ID_INDEO2 = 77,
CODEC_ID_FRAPS = 78,
CODEC_ID_TRUEMOTION2 = 79,
CODEC_ID_BMP = 80,
CODEC_ID_CSCD = 81,
CODEC_ID_MMVIDEO = 82,
CODEC_ID_ZMBV = 83,
CODEC_ID_AVS = 84,
CODEC_ID_SMACKVIDEO = 85,
CODEC_ID_NUV = 86,
CODEC_ID_KMVC = 87,
CODEC_ID_FLASHSV = 88,
CODEC_ID_CAVS = 89,
CODEC_ID_JPEG2000 = 90,
CODEC_ID_VMNC = 91,
CODEC_ID_VP5 = 92,
CODEC_ID_VP6 = 93,
CODEC_ID_VP6F = 94,
CODEC_ID_TARGA = 95,
CODEC_ID_DSICINVIDEO = 96,
CODEC_ID_TIERTEXSEQVIDEO = 97,
CODEC_ID_TIFF = 98,
CODEC_ID_GIF = 99,
CODEC_ID_DXA = 100,
CODEC_ID_DNXHD = 101,
CODEC_ID_THP = 102,
CODEC_ID_SGI = 103,
CODEC_ID_C93 = 104,
CODEC_ID_BETHSOFTVID = 105,
CODEC_ID_PTX = 106,
CODEC_ID_TXD = 107,
CODEC_ID_VP6A = 108,
CODEC_ID_AMV = 109,
CODEC_ID_VB = 110,
CODEC_ID_PCX = 111,
CODEC_ID_SUNRAST = 112,
CODEC_ID_INDEO4 = 113,
CODEC_ID_INDEO5 = 114,
CODEC_ID_MIMIC = 115,
CODEC_ID_RL2 = 116,
CODEC_ID_ESCAPE124 = 117,
CODEC_ID_DIRAC = 118,
CODEC_ID_BFI = 119,
CODEC_ID_CMV = 120,
CODEC_ID_MOTIONPIXELS = 121,
CODEC_ID_TGV = 122,
CODEC_ID_TGQ = 123,
CODEC_ID_TQI = 124,
CODEC_ID_AURA = 125,
CODEC_ID_AURA2 = 126,
CODEC_ID_V210X = 127,
CODEC_ID_TMV = 128,
CODEC_ID_V210 = 129,
CODEC_ID_DPX = 130,
CODEC_ID_MAD = 131,
CODEC_ID_FRWU = 132,
CODEC_ID_FLASHSV2 = 133,
CODEC_ID_CDGRAPHICS = 134,
CODEC_ID_R210 = 135,
CODEC_ID_ANM = 136,
CODEC_ID_BINKVIDEO = 137,
CODEC_ID_IFF_ILBM = 138,
CODEC_ID_IFF_BYTERUN1 = 139,
CODEC_ID_KGV1 = 140,
CODEC_ID_YOP = 141,
CODEC_ID_VP8 = 142,
CODEC_ID_PICTOR = 143,
CODEC_ID_ANSI = 144,
CODEC_ID_A64_MULTI = 145,
CODEC_ID_A64_MULTI5 = 146,
CODEC_ID_R10K = 147,
CODEC_ID_MXPEG = 148,
CODEC_ID_LAGARITH = 149,
CODEC_ID_PRORES = 150,
CODEC_ID_JV = 151,
CODEC_ID_DFA = 152,
CODEC_ID_WMV3IMAGE = 153,
CODEC_ID_VC1IMAGE = 154,
CODEC_ID_UTVIDEO = 155,
CODEC_ID_BMV_VIDEO = 156,
CODEC_ID_VBLE = 157,
CODEC_ID_DXTORY = 158,
CODEC_ID_V410 = 159,
CODEC_ID_XWD = 160,
CODEC_ID_CDXL = 161,
CODEC_ID_XBM = 162,
CODEC_ID_ZEROCODEC = 163,
CODEC_ID_Y41P = MKBETAG('Y','4','1','P'),
CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'),
CODEC_ID_EXR = MKBETAG('0','E','X','R'),
CODEC_ID_AVRP = MKBETAG('A','V','R','P'),
CODEC_ID_G2M = MKBETAG( 0 ,'G','2','M'),
CODEC_ID_AVUI = MKBETAG('A','V','U','I'),
CODEC_ID_AYUV = MKBETAG('A','Y','U','V'),
CODEC_ID_V308 = MKBETAG('V','3','0','8'),
CODEC_ID_V408 = MKBETAG('V','4','0','8'),
CODEC_ID_YUV4 = MKBETAG('Y','U','V','4'),
/* various PCM "codecs" */
CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
CODEC_ID_PCM_S16LE = 0x10000,
CODEC_ID_PCM_S16BE = 0x10000 + 1,
CODEC_ID_PCM_U16LE = 0x10000 + 2,
CODEC_ID_PCM_U16BE = 0x10000 + 3,
CODEC_ID_PCM_S8 = 0x10000 + 4,
CODEC_ID_PCM_U8 = 0x10000 + 5,
CODEC_ID_PCM_MULAW = 0x10000 + 6,
CODEC_ID_PCM_ALAW = 0x10000 + 7,
CODEC_ID_PCM_S32LE = 0x10000 + 8,
CODEC_ID_PCM_S32BE = 0x10000 + 9,
CODEC_ID_PCM_U32LE = 0x10000 + 10,
CODEC_ID_PCM_U32BE = 0x10000 + 11,
CODEC_ID_PCM_S24LE = 0x10000 + 12,
CODEC_ID_PCM_S24BE = 0x10000 + 13,
CODEC_ID_PCM_U24LE = 0x10000 + 14,
CODEC_ID_PCM_U24BE = 0x10000 + 15,
CODEC_ID_PCM_S24DAUD = 0x10000 + 16,
CODEC_ID_PCM_ZORK = 0x10000 + 17,
CODEC_ID_PCM_S16LE_PLANAR = 0x10000 + 18,
CODEC_ID_PCM_DVD = 0x10000 + 19,
CODEC_ID_PCM_F32BE = 0x10000 + 20,
CODEC_ID_PCM_F32LE = 0x10000 + 21,
CODEC_ID_PCM_F64BE = 0x10000 + 22,
CODEC_ID_PCM_F64LE = 0x10000 + 23,
CODEC_ID_PCM_BLURAY = 0x10000 + 24,
CODEC_ID_PCM_LXF = 0x10000 + 25,
CODEC_ID_S302M = 0x10000 + 26,
CODEC_ID_PCM_S8_PLANAR = 0x10000 + 26,
/* various ADPCM codecs */
CODEC_ID_ADPCM_IMA_QT = 0x11000,
CODEC_ID_ADPCM_IMA_WAV = 0x11000 + 1,
CODEC_ID_ADPCM_IMA_DK3 = 0x11000 + 2,
CODEC_ID_ADPCM_IMA_DK4 = 0x11000 + 3,
CODEC_ID_ADPCM_IMA_WS = 0x11000 + 4,
CODEC_ID_ADPCM_IMA_SMJPEG = 0x11000 + 5,
CODEC_ID_ADPCM_MS = 0x11000 + 6,
CODEC_ID_ADPCM_4XM = 0x11000 + 7,
CODEC_ID_ADPCM_XA = 0x11000 + 8,
CODEC_ID_ADPCM_ADX = 0x11000 + 9,
CODEC_ID_ADPCM_EA = 0x11000 + 10,
CODEC_ID_ADPCM_G726 = 0x11000 + 11,
CODEC_ID_ADPCM_CT = 0x11000 + 12,
CODEC_ID_ADPCM_SWF = 0x11000 + 13,
CODEC_ID_ADPCM_YAMAHA = 0x11000 + 14,
CODEC_ID_ADPCM_SBPRO_4 = 0x11000 + 15,
CODEC_ID_ADPCM_SBPRO_3 = 0x11000 + 16,
CODEC_ID_ADPCM_SBPRO_2 = 0x11000 + 17,
CODEC_ID_ADPCM_THP = 0x11000 + 18,
CODEC_ID_ADPCM_IMA_AMV = 0x11000 + 19,
CODEC_ID_ADPCM_EA_R1 = 0x11000 + 20,
CODEC_ID_ADPCM_EA_R3 = 0x11000 + 21,
CODEC_ID_ADPCM_EA_R2 = 0x11000 + 22,
CODEC_ID_ADPCM_IMA_EA_SEAD = 0x11000 + 23,
CODEC_ID_ADPCM_IMA_EA_EACS = 0x11000 + 24,
CODEC_ID_ADPCM_EA_XAS = 0x11000 + 25,
CODEC_ID_ADPCM_EA_MAXIS_XA = 0x11000 + 26,
CODEC_ID_ADPCM_IMA_ISS = 0x11000 + 27,
CODEC_ID_ADPCM_G722 = 0x11000 + 28,
CODEC_ID_ADPCM_IMA_APC = 0x11000 + 29,
/* AMR */
CODEC_ID_AMR_NB = 0x12000,
CODEC_ID_AMR_WB = 0x12000 + 1,
/* RealAudio codecs*/
CODEC_ID_RA_144 = 0x13000,
CODEC_ID_RA_288 = 0x13000 + 1,
/* various DPCM codecs */
CODEC_ID_ROQ_DPCM = 0x14000,
CODEC_ID_INTERPLAY_DPCM = 0x14000 + 1,
CODEC_ID_XAN_DPCM = 0x14000 + 2,
CODEC_ID_SOL_DPCM = 0x14000 + 3,
/* audio codecs */
CODEC_ID_MP2 = 0x15000,
CODEC_ID_MP3 = 0x15000 + 1, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3
CODEC_ID_AAC = 0x15000 + 2,
CODEC_ID_AC3 = 0x15000 + 3,
CODEC_ID_DTS = 0x15000 + 4,
CODEC_ID_VORBIS = 0x15000 + 5,
CODEC_ID_DVAUDIO = 0x15000 + 6,
CODEC_ID_WMAV1 = 0x15000 + 7,
CODEC_ID_WMAV2 = 0x15000 + 8,
CODEC_ID_MACE3 = 0x15000 + 9,
CODEC_ID_MACE6 = 0x15000 + 10,
CODEC_ID_VMDAUDIO = 0x15000 + 11,
CODEC_ID_FLAC = 0x15000 + 12,
CODEC_ID_MP3ADU = 0x15000 + 13,
CODEC_ID_MP3ON4 = 0x15000 + 14,
CODEC_ID_SHORTEN = 0x15000 + 15,
CODEC_ID_ALAC = 0x15000 + 16,
CODEC_ID_WESTWOOD_SND1 = 0x15000 + 17,
CODEC_ID_GSM = 0x15000 + 18, ///< as in Berlin toast format
CODEC_ID_QDM2 = 0x15000 + 19,
CODEC_ID_COOK = 0x15000 + 20,
CODEC_ID_TRUESPEECH = 0x15000 + 21,
CODEC_ID_TTA = 0x15000 + 22,
CODEC_ID_SMACKAUDIO = 0x15000 + 23,
CODEC_ID_QCELP = 0x15000 + 24,
CODEC_ID_WAVPACK = 0x15000 + 25,
CODEC_ID_DSICINAUDIO = 0x15000 + 26,
CODEC_ID_IMC = 0x15000 + 27,
CODEC_ID_MUSEPACK7 = 0x15000 + 28,
CODEC_ID_MLP = 0x15000 + 29,
CODEC_ID_GSM_MS = 0x15000 + 30, /* as found in WAV */
CODEC_ID_ATRAC3 = 0x15000 + 31,
CODEC_ID_VOXWARE = 0x15000 + 32,
CODEC_ID_APE = 0x15000 + 33,
CODEC_ID_NELLYMOSER = 0x15000 + 34,
CODEC_ID_MUSEPACK8 = 0x15000 + 35,
CODEC_ID_SPEEX = 0x15000 + 36,
CODEC_ID_WMAVOICE = 0x15000 + 37,
CODEC_ID_WMAPRO = 0x15000 + 38,
CODEC_ID_WMALOSSLESS = 0x15000 + 39,
CODEC_ID_ATRAC3P = 0x15000 + 40,
CODEC_ID_EAC3 = 0x15000 + 41,
CODEC_ID_SIPR = 0x15000 + 42,
CODEC_ID_MP1 = 0x15000 + 43,
CODEC_ID_TWINVQ = 0x15000 + 44,
CODEC_ID_TRUEHD = 0x15000 + 45,
CODEC_ID_MP4ALS = 0x15000 + 46,
CODEC_ID_ATRAC1 = 0x15000 + 47,
CODEC_ID_BINKAUDIO_RDFT = 0x15000 + 48,
CODEC_ID_BINKAUDIO_DCT = 0x15000 + 49,
CODEC_ID_AAC_LATM = 0x15000 + 50,
CODEC_ID_QDMC = 0x15000 + 51,
CODEC_ID_CELT = 0x15000 + 52,
CODEC_ID_G723_1 = 0x15000 + 53,
CODEC_ID_G729 = 0x15000 + 54,
CODEC_ID_8SVX_EXP = 0x15000 + 55,
CODEC_ID_8SVX_FIB = 0x15000 + 56,
CODEC_ID_BMV_AUDIO = 0x15000 + 57,
CODEC_ID_RALF = 0x15000 + 58,
CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'),
CODEC_ID_8SVX_RAW = MKBETAG('8','S','V','X'),
CODEC_ID_SONIC = MKBETAG('S','O','N','C'),
CODEC_ID_SONIC_LS = MKBETAG('S','O','N','L'),
/* subtitle codecs */
CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
CODEC_ID_DVD_SUBTITLE = 0x17000,
CODEC_ID_DVB_SUBTITLE = 0x17000 + 1,
CODEC_ID_TEXT = 0x17000 + 2, ///< raw UTF-8 text
CODEC_ID_XSUB = 0x17000 + 3,
CODEC_ID_SSA = 0x17000 + 4,
CODEC_ID_MOV_TEXT = 0x17000 + 5,
CODEC_ID_HDMV_PGS_SUBTITLE = 0x17000 + 6,
CODEC_ID_DVB_TELETEXT = 0x17000 + 7,
CODEC_ID_SRT = 0x17000 + 8,
CODEC_ID_MICRODVD = MKBETAG('m','D','V','D'),
CODEC_ID_EIA_608 = MKBETAG('c','6','0','8'),
CODEC_ID_JACOSUB = MKBETAG('J','S','U','B'),
/* other specific kind of codecs (generally used for attachments) */
CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
CODEC_ID_TTF = 0x18000,
CODEC_ID_BINTEXT = MKBETAG('B','T','X','T'),
CODEC_ID_XBIN = MKBETAG('X','B','I','N'),
CODEC_ID_IDF = MKBETAG( 0 ,'I','D','F'),
CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like CODEC_ID_NONE) but lavf should attempt to identify it
CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
* stream (only used by libavformat) */
CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
* stream (only used by libavformat) */
CODEC_ID_FFMETADATA = 0x21000; ///< Dummy codec for streams containing only metadata information.
/**
* @ingroup lavc_decoding
* Required number of additionally allocated bytes at the end of the input bitstream for decoding.
* This is mainly needed because some optimized bitstream readers read
* 32 or 64 bit at once and could read over the end.<br>
* Note: If the first 23 bits of the additional bytes are not 0, then damaged
* MPEG bitstreams could cause overread and segfault.
*/
public static final int FF_INPUT_BUFFER_PADDING_SIZE = 16;
/**
* @ingroup lavc_encoding
* minimum encoding buffer size
* Used to avoid some checks during header writing.
*/
public static final int FF_MIN_BUFFER_SIZE = 16384;
/**
* @ingroup lavc_encoding
* motion estimation type.
*/
public static final int // enum Motion_Est_ID {
ME_ZERO = 1, ///< no search, that is use 0,0 vector whenever one is needed
ME_FULL = 2,
ME_LOG = 3,
ME_PHODS = 4,
ME_EPZS = 5, ///< enhanced predictive zonal search
ME_X1 = 6, ///< reserved for experiments
ME_HEX = 7, ///< hexagon based search
ME_UMH = 8, ///< uneven multi-hexagon search
ME_ITER = 9, ///< iterative search
ME_TESA = 10; ///< transformed exhaustive search algorithm
/**
* @ingroup lavc_decoding
*/
public static final int // enum AVDiscard {
/* We leave some space between them for extensions (drop some
* keyframes for intra-only or drop just some bidir frames). */
AVDISCARD_NONE =-16, ///< discard nothing
AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size packets in avi
AVDISCARD_NONREF = 8, ///< discard all non reference
AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames
AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes
AVDISCARD_ALL = 48; ///< discard all
public static final int // enum AVColorPrimaries {
AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
AVCOL_PRI_UNSPECIFIED = 2,
AVCOL_PRI_BT470M = 4,
AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above
AVCOL_PRI_FILM = 8;
public static final int // enum AVColorTransferCharacteristic {
AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361
AVCOL_TRC_UNSPECIFIED = 2,
AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG
AVCOL_TRC_SMPTE240M = 7;
public static final int // enum AVColorSpace {
AVCOL_SPC_RGB = 0,
AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
AVCOL_SPC_UNSPECIFIED = 2,
AVCOL_SPC_FCC = 4,
AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
AVCOL_SPC_SMPTE240M = 7,
AVCOL_SPC_YCOCG = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
AVCOL_SPC_YCGCO = AVCOL_SPC_YCOCG;
public static final int // enum AVColorRange {
AVCOL_RANGE_UNSPECIFIED = 0,
AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges
AVCOL_RANGE_JPEG = 2; ///< the normal 2^n-1 "JPEG" YUV ranges
/**
* X X 3 4 X X are luma samples,
* 1 2 1-6 are possible chroma positions
* X X 5 6 X 0 is undefined/unknown position
*/
public static final int // enum AVChromaLocation {
AVCHROMA_LOC_UNSPECIFIED = 0,
AVCHROMA_LOC_LEFT = 1, ///< mpeg2/4, h264 default
AVCHROMA_LOC_CENTER = 2, ///< mpeg1, jpeg, h263
AVCHROMA_LOC_TOPLEFT = 3, ///< DV
AVCHROMA_LOC_TOP = 4,
AVCHROMA_LOC_BOTTOMLEFT = 5,
AVCHROMA_LOC_BOTTOM = 6;
public static final int // enum AVAudioServiceType {
AV_AUDIO_SERVICE_TYPE_MAIN = 0,
AV_AUDIO_SERVICE_TYPE_EFFECTS = 1,
AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2,
AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3,
AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4,
AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5,
AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6,
AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7,
AV_AUDIO_SERVICE_TYPE_KARAOKE = 8;
/**
* @ingroup lavc_encoding
*/
public static class RcOverride extends Pointer {
static { load(); }
public RcOverride() { allocate(); }
public RcOverride(int size) { allocateArray(size); }
public RcOverride(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(int size);
@Override public RcOverride position(int position) {
return (RcOverride)super.position(position);
}
public native int start_frame(); public native RcOverride start_frame(int start_frame);
public native int end_frame(); public native RcOverride end_frame(int end_frame);
public native int qscale(); public native RcOverride qscale(int qscale); // If this is 0 then quality_factor will be used instead.
public native float quality_factor(); public native RcOverride quality_factor(float quality_factor);
}
public static final int FF_MAX_B_FRAMES = 16;
/* encoding support
These flags can be passed in AVCodecContext.flags before initialization.
Note: Not everything is supported yet.
*/
public static final int
CODEC_FLAG_QSCALE = 0x0002, ///< Use fixed qscale.
CODEC_FLAG_4MV = 0x0004, ///< 4 MV per MB allowed / advanced prediction for H.263.
CODEC_FLAG_QPEL = 0x0010, ///< Use qpel MC.
CODEC_FLAG_GMC = 0x0020, ///< Use GMC.
CODEC_FLAG_MV0 = 0x0040; ///< Always try a MB with MV=<0,0>.
/**
* The parent program guarantees that the input for B-frames containing
* streams is not written to for at least s->max_b_frames+1 frames, if
* this is not set the input will be copied.
*/
public static final int
CODEC_FLAG_INPUT_PRESERVED = 0x0100,
CODEC_FLAG_PASS1 = 0x0200, ///< Use internal 2pass ratecontrol in first pass mode.
CODEC_FLAG_PASS2 = 0x0400, ///< Use internal 2pass ratecontrol in second pass mode.
CODEC_FLAG_GRAY = 0x2000, ///< Only decode/encode grayscale.
CODEC_FLAG_EMU_EDGE = 0x4000, ///< Don't draw edges.
CODEC_FLAG_PSNR = 0x8000, ///< error[?] variables will be set during encoding.
CODEC_FLAG_TRUNCATED = 0x00010000, /** Input bitstream might be truncated at a random
location instead of only at frame boundaries. */
CODEC_FLAG_NORMALIZE_AQP = 0x00020000, ///< Normalize adaptive quantization.
CODEC_FLAG_INTERLACED_DCT = 0x00040000, ///< Use interlaced DCT.
CODEC_FLAG_LOW_DELAY = 0x00080000, ///< Force low delay.
CODEC_FLAG_GLOBAL_HEADER = 0x00400000, ///< Place global headers in extradata instead of every keyframe.
CODEC_FLAG_BITEXACT = 0x00800000, ///< Use only bitexact stuff (except (I)DCT).
/* Fx : Flag for h263+ extra options */
CODEC_FLAG_AC_PRED = 0x01000000, ///< H.263 advanced intra coding / MPEG-4 AC prediction
CODEC_FLAG_LOOP_FILTER = 0x00000800, ///< loop filter
CODEC_FLAG_INTERLACED_ME = 0x20000000, ///< interlaced motion estimation
CODEC_FLAG_CLOSED_GOP = 0x80000000,
CODEC_FLAG2_FAST = 0x00000001, ///< Allow non spec compliant speedup tricks.
CODEC_FLAG2_NO_OUTPUT = 0x00000004, ///< Skip bitstream encoding.
CODEC_FLAG2_LOCAL_HEADER = 0x00000008, ///< Place global headers at every keyframe instead of in extradata.
CODEC_FLAG2_CHUNKS = 0x00008000, ///< Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
CODEC_FLAG2_SHOW_ALL = 0x00400000; ///< Show all frames before the first keyframe
/* Unsupported options :
* Syntax Arithmetic coding (SAC)
* Reference Picture Selection
* Independent Segment Decoding */
/* /Fx */
/* codec capabilities */
public static final int CODEC_CAP_DRAW_HORIZ_BAND = 0x0001; ///< Decoder can use draw_horiz_band callback.
/**
* Codec uses get_buffer() for allocating buffers and supports custom allocators.
* If not set, it might not use get_buffer() at all or use operations that
* assume the buffer was allocated by avcodec_default_get_buffer.
*/
public static final int
CODEC_CAP_DR1 = 0x0002,
CODEC_CAP_TRUNCATED = 0x0008,
/* Codec can export data for HW decoding (XvMC). */
CODEC_CAP_HWACCEL = 0x0010;
/**
* Encoder or decoder requires flushing with NULL input at the end in order to
* give the complete and correct output.
*
* NOTE: If this flag is not set, the codec is guaranteed to never be fed with
* with NULL data. The user can still send NULL data to the public encode
* or decode function, but libavcodec will not pass it along to the codec
* unless this flag is set.
*
* Decoders:
* The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL,
* avpkt->size=0 at the end to get the delayed data until the decoder no longer
* returns frames.
*
* Encoders:
* The encoder needs to be fed with NULL data at the end of encoding until the
* encoder no longer returns data.
*
* NOTE: For encoders implementing the AVCodec.encode2() function, setting this
* flag also means that the encoder must set the pts and duration for
* each output packet. If this flag is not set, the pts and duration will
* be determined by libavcodec from the input frame.
*/
public static final int CODEC_CAP_DELAY = 0x0020;
/**
* Codec can be fed a final frame with a smaller size.
* This can be used to prevent truncation of the last audio samples.
*/
public static final int CODEC_CAP_SMALL_LAST_FRAME = 0x0040;
/**
* Codec can export data for HW decoding (VDPAU).
*/
public static final int CODEC_CAP_HWACCEL_VDPAU = 0x0080;
/**
* Codec can output multiple frames per AVPacket
* Normally demuxers return one frame at a time, demuxers which do not do
* are connected to a parser to split what they return into proper frames.
* This flag is reserved to the very rare category of codecs which have a
* bitstream that cannot be split into frames without timeconsuming
* operations like full decoding. Demuxers carring such bitstreams thus
* may return multiple frames in a packet. This has many disadvantages like
* prohibiting stream copy in many cases thus it should only be considered
* as a last resort.
*/
public static final int CODEC_CAP_SUBFRAMES = 0x0100;
/**
* Codec is experimental and is thus avoided in favor of non experimental
* encoders
*/
public static final int CODEC_CAP_EXPERIMENTAL = 0x0200;
/**
* Codec should fill in channel configuration and samplerate instead of container
*/
public static final int CODEC_CAP_CHANNEL_CONF = 0x0400;
/**
* Codec is able to deal with negative linesizes
*/
public static final int CODEC_CAP_NEG_LINESIZES = 0x0800;
/**
* Codec supports frame-level multithreading.
*/
public static final int CODEC_CAP_FRAME_THREADS = 0x1000;
/**
* Codec supports slice-based (or partition-based) multithreading.
*/
public static final int CODEC_CAP_SLICE_THREADS = 0x2000;
/**
* Codec supports changed parameters at any point.
*/
public static final int CODEC_CAP_PARAM_CHANGE = 0x4000;
/**
* Codec supports avctx->thread_count == 0 (auto).
*/
public static final int CODEC_CAP_AUTO_THREADS = 0x8000;
/**
* Audio encoder supports receiving a different number of samples in each call.
*/
public static final int CODEC_CAP_VARIABLE_FRAME_SIZE = 0x10000;
/**
* Codec is lossless.
*/
public static final int CODEC_CAP_LOSSLESS = 0x80000000;
//The following defines may change, don't expect compatibility if you use them.
public static final int
MB_TYPE_INTRA4x4 = 0x0001,
MB_TYPE_INTRA16x16 = 0x0002, //FIXME H.264-specific
MB_TYPE_INTRA_PCM = 0x0004, //FIXME H.264-specific
MB_TYPE_16x16 = 0x0008,
MB_TYPE_16x8 = 0x0010,
MB_TYPE_8x16 = 0x0020,
MB_TYPE_8x8 = 0x0040,
MB_TYPE_INTERLACED = 0x0080,
MB_TYPE_DIRECT2 = 0x0100, //FIXME
MB_TYPE_ACPRED = 0x0200,
MB_TYPE_GMC = 0x0400,
MB_TYPE_SKIP = 0x0800,
MB_TYPE_P0L0 = 0x1000,
MB_TYPE_P1L0 = 0x2000,
MB_TYPE_P0L1 = 0x4000,
MB_TYPE_P1L1 = 0x8000,
MB_TYPE_L0 = (MB_TYPE_P0L0 | MB_TYPE_P1L0),
MB_TYPE_L1 = (MB_TYPE_P0L1 | MB_TYPE_P1L1),
MB_TYPE_L0L1 = (MB_TYPE_L0 | MB_TYPE_L1),
MB_TYPE_QUANT = 0x00010000,
MB_TYPE_CBP = 0x00020000;
//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...)
/**
* Pan Scan area.
* This specifies the area which should be displayed.
* Note there may be multiple such areas for one frame.
*/
public static class AVPanScan extends Pointer {
static { load(); }
public AVPanScan() { allocate(); }
public AVPanScan(int size) { allocateArray(size); }
public AVPanScan(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(int size);
@Override public AVPanScan position(int position) {
return (AVPanScan)super.position(position);
}
/**
* id
* - encoding: Set by user.
* - decoding: Set by libavcodec.
*/
public native int id(); public native AVPanScan id(int id);
/**
* width and height in 1/16 pel
* - encoding: Set by user.
* - decoding: Set by libavcodec.
*/
public native int width(); public native AVPanScan width(int width);
public native int height(); public native AVPanScan height(int height);
/**
* position of the top left corner in 1/16 pel for up to 3 fields/frames
* - encoding: Set by user.
* - decoding: Set by libavcodec.
*/
// int16_t position[3][2];
public native short position(int i, int j); public native AVPanScan position(int i, int j, short position);
}
public static final int
FF_QSCALE_TYPE_MPEG1 = 0,
FF_QSCALE_TYPE_MPEG2 = 1,
FF_QSCALE_TYPE_H264 = 2,
FF_QSCALE_TYPE_VP56 = 3,
FF_BUFFER_TYPE_INTERNAL = 1,
FF_BUFFER_TYPE_USER = 2, ///< direct rendering buffers (image is (de)allocated by user)
FF_BUFFER_TYPE_SHARED = 4, ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared.
FF_BUFFER_TYPE_COPY = 8, ///< Just a (modified) copy of some other buffer, don't deallocate anything.
FF_BUFFER_HINTS_VALID = 0x01, // Buffer hints value is meaningful (if 0 ignore).
FF_BUFFER_HINTS_READABLE = 0x02, // Codec will read from buffer.
FF_BUFFER_HINTS_PRESERVE = 0x04, // User must not alter buffer content.
FF_BUFFER_HINTS_REUSABLE = 0x08; // Codec will reuse the buffer (update).
/**
* @defgroup lavc_packet AVPacket
*
* Types and functions for working with AVPacket.
* @{
*/
public static final int // enum AVPacketSideDataType {
AV_PKT_DATA_PALETTE = 0,
AV_PKT_DATA_NEW_EXTRADATA = 1,
/**
* An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
* @code
* u32le param_flags
* if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT)
* s32le channel_count
* if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT)
* u64le channel_layout
* if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE)
* s32le sample_rate
* if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS)
* s32le width
* s32le height
* @endcode
*/
AV_PKT_DATA_PARAM_CHANGE = 2,
/**
* An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of
* structures with info about macroblocks relevant to splitting the
* packet into smaller packets on macroblock edges (e.g. as for RFC 2190).
* That is, it does not necessarily contain info about all macroblocks,
* as long as the distance between macroblocks in the info is smaller
* than the target payload size.
* Each MB info structure is 12 bytes, and is laid out as follows:
* @code
* u32le bit offset from the start of the packet
* u8 current quantizer at the start of the macroblock
* u8 GOB number
* u16le macroblock address within the GOB
* u8 horizontal MV predictor
* u8 vertical MV predictor
* u8 horizontal MV predictor for block number 3
* u8 vertical MV predictor for block number 3
* @endcode
*/
AV_PKT_DATA_H263_MB_INFO = 3;
public static class AVPacket extends Pointer {
static { load(); }
public AVPacket() { allocate(); }
public AVPacket(int size) { allocateArray(size); }
public AVPacket(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(int size);
@Override public AVPacket position(int position) {
return (AVPacket)super.position(position);
}
/**
* Presentation timestamp in AVStream->time_base units; the time at which
* the decompressed packet will be presented to the user.
* Can be AV_NOPTS_VALUE if it is not stored in the file.
* pts MUST be larger or equal to dts as presentation cannot happen before
* decompression, unless one wants to view hex dumps. Some formats misuse
* the terms dts and pts/cts to mean something different. Such timestamps
* must be converted to true pts/dts before they are stored in AVPacket.
*/
public native long pts(); public native AVPacket pts(long pts);
/**
* Decompression timestamp in AVStream->time_base units; the time at which
* the packet is decompressed.
* Can be AV_NOPTS_VALUE if it is not stored in the file.
*/
public native long dts(); public native AVPacket dts(long dts);
@Cast("uint8_t*")
public native BytePointer data(); public native AVPacket data(BytePointer data);
public native int size(); public native AVPacket size(int size);
public native int stream_index(); public native AVPacket stream_index(int stream_index);
/**
* A combination of AV_PKT_FLAG values
*/
public native int flags(); public native AVPacket flags(int flags);
/**
* Additional packet data that can be provided by the container.
* Packet can contain several types of side information.
*/
@Name({"side_data", ".data"}) @Cast("uint8_t*")
public native BytePointer side_data_data(int i); public native AVPacket side_data_data(int i, BytePointer data);
@Name({"side_data", ".size"})
public native int side_data_size(int i); public native AVPacket side_data_size(int i, int size);
@Name({"side_data", ".type"}) @Cast("AVPacketSideDataType")
public native int side_data_type(int i); public native AVPacket side_data_type(int i, int type);
public native int side_data_elems(); public native AVPacket side_data_elems(int side_data_elems);
/**
* Duration of this packet in AVStream->time_base units, 0 if unknown.
* Equals next_pts - this_pts in presentation order.
*/
public native int duration(); public native AVPacket duration(int duration);
public static class Destruct extends FunctionPointer {
static { load(); }
public native void call(AVPacket p);
}
public native Destruct destruct(); public native AVPacket destruct(Destruct destruct);
public native Pointer priv(); public native AVPacket priv(Pointer priv);
public native long pos(); public native AVPacket pos(long pos); ///< byte position in stream, -1 if unknown
/**
* Time difference in AVStream->time_base units from the pts of this
* packet to the point at which the output from the decoder has converged
* independent from the availability of previous frames. That is, the
* frames are virtually identical no matter if decoding started from
* the very first frame or from this keyframe.
* Is AV_NOPTS_VALUE if unknown.
* This field is not the display duration of the current packet.
* This field has no meaning if the packet does not have AV_PKT_FLAG_KEY
* set.
*
* The purpose of this field is to allow seeking in streams that have no
* keyframes in the conventional sense. It corresponds to the
* recovery point SEI in H.264 and match_time_delta in NUT. It is also
* essential for some types of subtitle streams to ensure that all
* subtitles are correctly displayed after seeking.
*/
public native long convergence_duration(); public native AVPacket convergence_duration(long convergence_duration);
}
public static final int
AV_PKT_FLAG_KEY = 0x0001, ///< The packet contains a keyframe
AV_PKT_FLAG_CORRUPT = 0x0002; ///< The packet content is corrupted
public static final int // enum AVSideDataParamChangeFlags {
AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001,
AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002,
AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004,
AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008;
/**
* @}
*/
/**
* Audio Video Frame.
* New fields can be added to the end of AVFRAME with minor version
* bumps. Similarly fields that are marked as to be only accessed by
* av_opt_ptr() can be reordered. This allows 2 forks to add fields
* without breaking compatibility with each other.
* Removal, reordering and changes in the remaining cases require
* a major version bump.
* sizeof(AVFrame) must not be used outside libavcodec.
*/
public static class AVFrame extends AVPicture {
static { load(); }
public AVFrame() { allocate(); }
public AVFrame(int size) { allocateArray(size); }
public AVFrame(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(int size);
@Override public AVFrame position(int position) {
return (AVFrame)super.position(position);
}
public static final int AV_NUM_DATA_POINTERS = 8;
/**
* pointer to the picture/channel planes.
* This might be different from the first allocated byte
* - encoding: Set by user
* - decoding: set by AVCodecContext.get_buffer()
*/
// @Cast("uint8_t*") // uint8_t *data[AV_NUM_DATA_POINTERS];
// public native BytePointer data(int i); public native AVFrame data(int i, BytePointer data);
/**
* Size, in bytes, of the data for each picture/channel plane.
*
* For audio, only linesize[0] may be set. For planar audio, each channel
* plane must be the same size.
*
* - encoding: Set by user
* - decoding: set by AVCodecContext.get_buffer()
*/
// int linesize[AV_NUM_DATA_POINTERS];
// public native int linesize(int i); public native AVFrame linesize(int i, int linesize);
/**
* pointers to the data planes/channels.
*
* For video, this should simply point to data[].
*
* For planar audio, each channel has a separate data pointer, and
* linesize[0] contains the size of each channel buffer.
* For packed audio, there is just one data pointer, and linesize[0]
* contains the total size of the buffer for all channels.
*
* Note: Both data and extended_data will always be set by get_buffer(),
* but for planar audio with more channels that can fit in data,
* extended_data must be used by the decoder in order to access all
* channels.
*
* encoding: unused
* decoding: set by AVCodecContext.get_buffer()
*/
@Cast("uint8_t**")
public native PointerPointer extended_data(); public native AVFrame extended_data(PointerPointer extended_data);
/**
* width and height of the video frame
* - encoding: unused
* - decoding: Read by user.
*/
public native int width(); public native AVFrame width(int width);
public native int height(); public native AVFrame height(int height);
/**
* number of audio samples (per channel) described by this frame
* - encoding: Set by user
* - decoding: Set by libavcodec
*/
public native int nb_samples(); public native AVFrame nb_samples(int nb_samples);
/**
* format of the frame, -1 if unknown or unset
* Values correspond to enum PixelFormat for video frames,
* enum AVSampleFormat for audio)
* - encoding: unused
* - decoding: Read by user.
*/
public native int format(); public native AVFrame format(int format);
/**
* 1 -> keyframe, 0-> not
* - encoding: Set by libavcodec.
* - decoding: Set by libavcodec.
*/
public native int key_frame(); public native AVFrame key_frame(int key_frame);
/**
* Picture type of the frame, see ?_TYPE below.
* - encoding: Set by libavcodec. for coded_picture (and set by user for input).
* - decoding: Set by libavcodec.
*/
@Cast("AVPictureType")
public native int pict_type(); public native AVFrame pict_type(int pict_type);
/**
* pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer.
* This isn't used by libavcodec unless the default get/release_buffer() is used.
* - encoding:
* - decoding:
*/
@Cast("uint8_t*") // uint8_t *base[AV_NUM_DATA_POINTERS];
public native BytePointer base(int i); public native AVFrame base(int i, BytePointer base);
/**
* sample aspect ratio for the video frame, 0/1 if unknown/unspecified
* - encoding: unused
* - decoding: Read by user.
*/
@ByRef
public native AVRational sample_aspect_ratio(); public native AVFrame sample_aspect_ratio(AVRational sample_aspect_ratio);
/**
* presentation timestamp in time_base units (time when frame should be shown to user)
* If AV_NOPTS_VALUE then frame_rate = 1/time_base will be assumed.
* - encoding: MUST be set by user.
* - decoding: Set by libavcodec.
*/
public native long pts(); public native AVFrame pts(long pts);
/**
* reordered pts from the last AVPacket that has been input into the decoder
* - encoding: unused
* - decoding: Read by user.
*/
public native long pkt_pts(); public native AVFrame pkt_pts(long pkt_pts);
/**
* dts from the last AVPacket that has been input into the decoder
* - encoding: unused
* - decoding: Read by user.
*/
public native long pkt_dts(); public native AVFrame pkt_dts(long pkt_dts);
/**
* picture number in bitstream order
* - encoding: set by
* - decoding: Set by libavcodec.
*/
public native int coded_picture_number(); public native AVFrame coded_picture_number(int coded_picture_number);
/**
* picture number in display order
* - encoding: set by
* - decoding: Set by libavcodec.
*/
public native int display_picture_number(); public native AVFrame display_picture_number(int display_picture_number);
/**
* quality (between 1 (good) and FF_LAMBDA_MAX (bad))
* - encoding: Set by libavcodec. for coded_picture (and set by user for input).
* - decoding: Set by libavcodec.
*/
public native int quality(); public native AVFrame quality(int quality);
/**
* is this picture used as reference
* The values for this are the same as the MpegEncContext.picture_structure
* variable, that is 1->top field, 2->bottom field, 3->frame/both fields.
* Set to 4 for delayed, non-reference frames.
* - encoding: unused
* - decoding: Set by libavcodec. (before get_buffer() call)).
*/
public native int reference(); public native AVFrame reference(int reference);
/**
* QP table
* - encoding: unused
* - decoding: Set by libavcodec.
*/
@Cast("int8_t*")
public native BytePointer qscale_table(); public native AVFrame qscale_table(BytePointer qscale_table);
/**
* QP store stride
* - encoding: unused
* - decoding: Set by libavcodec.
*/
public native int qstride(); public native AVFrame qstride(int qstride);
/**
*
*/
public native int qscale_type(); public native AVFrame qscale_type(int qscale_type);
/**
* mbskip_table[mb]>=1 if MB didn't change
* stride= mb_width = (width+15)>>4
* - encoding: unused
* - decoding: Set by libavcodec.
*/
@Cast("uint8_t*")
public native BytePointer mbskip_table(); public native AVFrame mbskip_table(BytePointer mbskip_table);
/**
* motion vector table
* @code
* example:
* int mv_sample_log2= 4 - motion_subsample_log2;
* int mb_width= (width+15)>>4;
* int mv_stride= (mb_width << mv_sample_log2) + 1;
* motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];
* @endcode
* - encoding: Set by user.
* - decoding: Set by libavcodec.
*/
@Cast("int16_t (*)[2]") // int16_t (*motion_val[2])[2];
public native PointerPointer motion_val(int i); public native AVFrame motion_val(int i, PointerPointer motion_val);
public native short motion_val(int i, int j, int k); public native AVFrame motion_val(int i, int j, int k, short motion_val);
/**
* macroblock type table
* mb_type_base + mb_width + 2
* - encoding: Set by user.
* - decoding: Set by libavcodec.
*/
@Cast("uint32_t*")
public native IntPointer mb_type(); public native AVFrame mb_type(IntPointer mb_type);
/**
* DCT coefficients
* - encoding: unused
* - decoding: Set by libavcodec.
*/
public native ShortPointer dct_coeff(); public native AVFrame dct_coeff(ShortPointer dct_coeff);
/**
* motion reference frame index
* the order in which these are stored can depend on the codec.
* - encoding: Set by user.
* - decoding: Set by libavcodec.
*/
@Cast("int8_t*") // int8_t *ref_index[2];
public native BytePointer ref_index(int i); public native AVFrame ref_index(int i, BytePointer ref_index);
/**
* for some private data of the user
* - encoding: unused
* - decoding: Set by user.
*/
public native Pointer opaque(); public native AVFrame opaque(Pointer opaque);
/**
* error
* - encoding: Set by libavcodec. if flags&CODEC_FLAG_PSNR.
* - decoding: unused
*/
@Cast("uint64_t") // uint64_t error[AV_NUM_DATA_POINTERS];
public native int error(long i); public native AVFrame error(long i, int error);
/**
* type of the buffer (to keep track of who has to deallocate data[*])
* - encoding: Set by the one who allocates it.
* - decoding: Set by the one who allocates it.
* Note: User allocated (direct rendering) & internal buffers cannot coexist currently.
*/
public native int type(); public native AVFrame type(int type);
/**
* When decoding, this signals how much the picture must be delayed.
* extra_delay = repeat_pict / (2*fps)
* - encoding: unused
* - decoding: Set by libavcodec.
*/
public native int repeat_pict(); public native AVFrame repeat_pict(int repeat_pict);
/**
* The content of the picture is interlaced.
* - encoding: Set by user.
* - decoding: Set by libavcodec. (default 0)
*/
public native int interlaced_frame(); public native AVFrame interlaced_frame(int interlaced_frame);
/**
* If the content is interlaced, is top field displayed first.
* - encoding: Set by user.
* - decoding: Set by libavcodec.
*/
public native int top_field_first(); public native AVFrame top_field_first(int top_field_first);
/**
* Tell user application that palette has changed from previous frame.
* - encoding: ??? (no palette-enabled encoder yet)
* - decoding: Set by libavcodec. (default 0).
*/
public native int palette_has_changed(); public native AVFrame palette_has_changed(int palette_has_changed);
/**
* codec suggestion on buffer type if != 0
* - encoding: unused
* - decoding: Set by libavcodec. (before get_buffer() call)).
*/
public native int buffer_hints(); public native AVFrame buffer_hints(int buffer_hints);
/**
* Pan scan.
* - encoding: Set by user.
* - decoding: Set by libavcodec.
*/
public native AVPanScan pan_scan(); public native AVFrame pan_scan(AVPanScan pan_scan);
/**
* hardware accelerator private data (FFmpeg-allocated)
* - encoding: unused
* - decoding: Set by libavcodec
*/
public native Pointer hwaccel_picture_private(); public native AVFrame hwaccel_picture_private(Pointer hwaccel_picture_private);
/**
* the AVCodecContext which ff_thread_get_buffer() was last called on
* - encoding: Set by libavcodec.
* - decoding: Set by libavcodec.
*/
public native AVCodecContext owner(); public native AVFrame owner(AVCodecContext owner);
/**
* used by multithreading to store frame-specific info
* - encoding: Set by libavcodec.
* - decoding: Set by libavcodec.
*/
public native Pointer thread_opaque(); public native AVFrame thread_opaque(Pointer thread_opaque);
/**
* log2 of the size of the block which a single vector in motion_val represents:
* (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)
* - encoding: unused
* - decoding: Set by libavcodec.
*/
@Cast("uint8_t")
public native int motion_subsample_log2(); public native AVFrame motion_subsample_log2(int motion_subsample_log2);
/**
* Sample rate of the audio data.
*
* - encoding: unused
* - decoding: read by user
*/
public native int sample_rate(); public native AVFrame sample_rate(int sample_rate);
/**
* Channel layout of the audio data.
*
* - encoding: unused
* - decoding: read by user.
*/
@Cast("uint64_t")
public native long channel_layout(); public native AVFrame channel_layout(long channel_layout);
/**
* frame timestamp estimated using various heuristics, in stream time base
* Code outside libavcodec should access this field using:
* av_frame_get_best_effort_timestamp(frame)
* - encoding: unused
* - decoding: set by libavcodec, read by user.
*/
public native long best_effort_timestamp(); public native AVFrame best_effort_timestamp(long best_effort_timestamp);
/**
* reordered pos from the last AVPacket that has been input into the decoder
* Code outside libavcodec should access this field using:
* av_frame_get_pkt_pos(frame)
* - encoding: unused
* - decoding: Read by user.
*/
public native long pkt_pos(); public native AVFrame pkt_pos(long pkt_pos);
}
/**
* Accessors for some AVFrame fields.
* The position of these field in the structure is not part of the ABI,
* they should not be accessed directly outside libavcodec.
*/
public static native long av_frame_get_best_effort_timestamp(AVFrame frame);
public static native long av_frame_get_pkt_pos (AVFrame frame);
public static native long av_frame_get_channel_layout (AVFrame frame);
public static native int av_frame_get_sample_rate (AVFrame frame);
public static native void av_frame_set_best_effort_timestamp(AVFrame frame, long val);
public static native void av_frame_set_pkt_pos (AVFrame frame, long val);
public static native void av_frame_set_channel_layout (AVFrame frame, long val);
public static native void av_frame_set_sample_rate (AVFrame frame, int val);
@Opaque public static class AVCodecInternal extends Pointer {
public AVCodecInternal() { }
public AVCodecInternal(Pointer p) { super(p); }
}
public static final int // enum AVFieldOrder
AV_FIELD_UNKNOWN = 0,
AV_FIELD_PROGRESSIVE = 1,
AV_FIELD_TT = 2, //< Top coded_first, top displayed first
AV_FIELD_BB = 3, //< Bottom coded first, bottom displayed first
AV_FIELD_TB = 4, //< Top coded first, bottom displayed first
AV_FIELD_BT = 5; //< Bottom coded first, top displayed first
/**
* main external API structure.
* New fields can be added to the end with minor version bumps.
* Removal, reordering and changes to existing fields require a major
* version bump.
* Please use AVOptions (av_opt* / av_set/get*()) to access these fields from user
* applications.
* sizeof(AVCodecContext) must not be used outside libav*.
*/
public static class AVCodecContext extends Pointer {
static { load(); }
public AVCodecContext() { allocate(); }
public AVCodecContext(int size) { allocateArray(size); }
public AVCodecContext(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(int size);
@Override public AVCodecContext position(int position) {
return (AVCodecContext)super.position(position);
}
/**
* information on struct for av_log
* - set by avcodec_alloc_context3
*/
@Const
public native AVClass av_class(); public native AVCodecContext av_class(AVClass av_class);
public native int log_level_offset(); public native AVCodecContext log_level_offset(int log_level_offset);
@Cast("AVMediaType") /* see AVMEDIA_TYPE_xxx */
public native int codec_type(); public native AVCodecContext codec_type(int codec_type);
public native AVCodec codec(); public native AVCodecContext codec(AVCodec codec);
public native String codec_name(); public native AVCodecContext codec_name(String codec_name);
@Cast("CodecID") /* see CODEC_ID_xxx */
public native int codec_id(); public native AVCodecContext codec_id(int codec_id);
/**
* fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
* This is used to work around some encoder bugs.
* A demuxer should set this to what is stored in the field used to identify the codec.
* If there are multiple such fields in a container then the demuxer should choose the one
* which maximizes the information about the used codec.
* If the codec tag field in a container is larger than 32 bits then the demuxer should
* remap the longer ID to 32 bits with a table or other structure. Alternatively a new
* extra_codec_tag + size could be added but for this a clear advantage must be demonstrated
* first.
* - encoding: Set by user, if not then the default based on codec_id will be used.
* - decoding: Set by user, will be converted to uppercase by libavcodec during init.
*/
public native int codec_tag(); public native AVCodecContext codec_tag(int codec_tag);
/**
* fourcc from the AVI stream header (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
* This is used to work around some encoder bugs.
* - encoding: unused
* - decoding: Set by user, will be converted to uppercase by libavcodec during init.
*/
@Cast("unsigned int")
public native int stream_codec_tag(); public native AVCodecContext stream_codec_tag(int stream_codec_tag);
public native Pointer priv_data(); public native AVCodecContext priv_data(Pointer priv_data);
/**
* Private context used for internal data.
*
* Unlike priv_data, this is not codec-specific. It is used in general
* libavcodec functions.
*/
public native AVCodecInternal internal(); public native AVCodecContext internal(AVCodecInternal internal);
/**
* Private data of the user, can be used to carry app specific stuff.
* - encoding: Set by user.
* - decoding: Set by user.
*/
public native Pointer opaque(); public native AVCodecContext opaque(Pointer opaque);
/**
* the average bitrate
* - encoding: Set by user; unused for constant quantizer encoding.
* - decoding: Set by libavcodec. 0 or some bitrate if this info is available in the stream.
*/
public native int bit_rate(); public native AVCodecContext bit_rate(int bit_rate);
/**
* number of bits the bitstream is allowed to diverge from the reference.
* the reference can be CBR (for CBR pass1) or VBR (for pass2)
* - encoding: Set by user; unused for constant quantizer encoding.
* - decoding: unused
*/
public native int bit_rate_tolerance(); public native AVCodecContext bit_rate_tolerance(int bit_rate_tolerance);
/**
* Global quality for codecs which cannot change it per frame.
* This should be proportional to MPEG-1/2/4 qscale.
* - encoding: Set by user.
* - decoding: unused
*/
public native int global_quality(); public native AVCodecContext global_quality(int global_quality);
/**
* - encoding: Set by user.
* - decoding: unused
*/
public native int compression_level(); public native AVCodecContext compression_level(int compression_level);
public static final int FF_COMPRESSION_DEFAULT = -1;
/**
* CODEC_FLAG_*.
* - encoding: Set by user.
* - decoding: Set by user.
*/
public native int flags(); public native AVCodecContext flags(int flags);
/**
* CODEC_FLAG2_*
* - encoding: Set by user.
* - decoding: Set by user.
*/
public native int flags2(); public native AVCodecContext flags2(int flags2);
/**
* some codecs need / can use extradata like Huffman tables.
* mjpeg: Huffman tables
* rv10: additional flags
* mpeg4: global headers (they can be in the bitstream or here)
* The allocated memory should be FF_INPUT_BUFFER_PADDING_SIZE bytes larger
* than extradata_size to avoid prolems if it is read with the bitstream reader.
* The bytewise contents of extradata must not depend on the architecture or CPU endianness.
* - encoding: Set/allocated/freed by libavcodec.
* - decoding: Set/allocated/freed by user.
*/
@Cast("uint8_t*")
public native BytePointer extradata(); public native AVCodecContext extradata(BytePointer extradata);
public native int extradata_size(); public native AVCodecContext extradata_size(int extradata_size);
/**
* This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identically 1.
* - encoding: MUST be set by user.
* - decoding: Set by libavcodec.
*/
@ByRef
public native AVRational time_base(); public native AVCodecContext time_base(AVRational time_base);
/**
* For some codecs, the time base is closer to the field rate than the frame rate.
* Most notably, H.264 and MPEG-2 specify time_base as half of frame duration
* if no telecine is used ...
*
* Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2.
*/
public native int ticks_per_frame(); public native AVCodecContext ticks_per_frame(int ticks_per_frame);
/**
* Encoding: Number of frames delay there will be from the encoder input to
* the decoder output. (we assume the decoder matches the spec)
* Decoding: Number of frames delay in addition to what a standard decoder
* as specified in the spec would produce.
*
* Video:
* Number of frames the decoded output will be delayed relative to the
* encoded input.
*
* Audio:
* Number of "priming" samples added to the beginning of the stream
* during encoding. The decoded output will be delayed by this many
* samples relative to the input to the encoder. Note that this field is
* purely informational and does not directly affect the pts output by
* the encoder, which should always be based on the actual presentation
* time, including any delay.
*
* - encoding: Set by libavcodec.
* - decoding: Set by libavcodec.
*/
public native int delay(); public native AVCodecContext delay(int delay);
/* video only */
/**
* picture width / height.
* - encoding: MUST be set by user.
* - decoding: Set by libavcodec.
* Note: For compatibility it is possible to set this instead of
* coded_width/height before decoding.
*/
public native int width(); public native AVCodecContext width(int width);
public native int height(); public native AVCodecContext height(int height);
/**
* Bitstream width / height, may be different from width/height if lowres enabled.
* - encoding: unused
* - decoding: Set by user before init if known. Codec should override / dynamically change if needed.
*/
public native int coded_width(); public native AVCodecContext coded_width(int coded_width);
public native int coded_height(); public native AVCodecContext coded_height(int coded_height);
public static final int FF_ASPECT_EXTENDED = 15;
/**
* the number of pictures in a group of pictures, or 0 for intra_only
* - encoding: Set by user.
* - decoding: unused
*/
public native int gop_size(); public native AVCodecContext gop_size(int gop_size);
/**
* Pixel format, see PIX_FMT_xxx.
* May be set by the demuxer if known from headers.
* May be overriden by the decoder if it knows better.
* - encoding: Set by user.
* - decoding: Set by user if known, overridden by libavcodec if known
*/
@Cast("PixelFormat")
public native int pix_fmt(); public native AVCodecContext pix_fmt(int pix_fmt);
/**
* Motion estimation algorithm used for video coding.
* 1 (zero), 2 (full), 3 (log), 4 (phods), 5 (epzs), 6 (x1), 7 (hex),
* 8 (umh), 9 (iter), 10 (tesa) [7, 8, 10 are x264 specific, 9 is snow specific]
* - encoding: MUST be set by user.
* - decoding: unused
*/
public native int me_method(); public native AVCodecContext me_method(int me_method);
/**
* If non NULL, 'draw_horiz_band' is called by the libavcodec
* decoder to draw a horizontal band. It improves cache usage. Not
* all codecs can do that. You must check the codec capabilities
* beforehand.
* When multithreading is used, it may be called from multiple threads
* at the same time; threads might draw different parts of the same AVFrame,
* or multiple AVFrames, and there is no guarantee that slices will be drawn
* in order.
* The function is also used by hardware acceleration APIs.
* It is called at least once during frame decoding to pass
* the data needed for hardware render.
* In that mode instead of pixel data, AVFrame points to
* a structure specific to the acceleration API. The application
* reads the structure and can change some fields to indicate progress
* or mark state.
* - encoding: unused
* - decoding: Set by user.
* @param height the height of the slice
* @param y the y position of the slice
* @param type 1->top field, 2->bottom field, 3->frame
* @param offset offset into the AVFrame.data from which the slice should be read
*/
public static class Draw_horiz_band extends FunctionPointer {
static { load(); }
public Draw_horiz_band(Pointer p) { super(p); }
protected Draw_horiz_band() { allocate(); }
protected final native void allocate();
public native void call(AVCodecContext s, @Const AVFrame src,
IntPointer offset/*[AV_NUM_DATA_POINTERS]*/, int y, int type, int height);
}
public native Draw_horiz_band draw_horiz_band(); public native AVCodecContext draw_horiz_band(Draw_horiz_band draw_horiz_band);
/**
* callback to negotiate the pixelFormat
* @param fmt is the list of formats which are supported by the codec,
* it is terminated by -1 as 0 is a valid format, the formats are ordered by quality.
* The first is always the native one.
* @return the chosen format
* - encoding: unused
* - decoding: Set by user, if not set the native format will be chosen.
*/
public static class Get_format extends FunctionPointer {
static { load(); }
public Get_format(Pointer p) { super(p); }
protected Get_format() { allocate(); }
protected final native void allocate();
public native @Cast("PixelFormat") int call(AVCodecContext s, @Cast("const PixelFormat*") IntPointer fmt);
}
public native Get_format get_format(); public native AVCodecContext get_format(Get_format get_format);
/**
* maximum number of B-frames between non-B-frames
* Note: The output will be delayed by max_b_frames+1 relative to the input.
* - encoding: Set by user.
* - decoding: unused
*/
public native int max_b_frames(); public native AVCodecContext max_b_frames(int max_b_frames);
/**
* qscale factor between IP and B-frames
* If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset).
* If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).
* - encoding: Set by user.
* - decoding: unused
*/
public native float b_quant_factor(); public native AVCodecContext b_quant_factor(float b_quant_factor);
/** obsolete FIXME remove */
public native int rc_strategy(); public native AVCodecContext rc_strategy(int rc_strategy);
public static final int FF_RC_STRATEGY_XVID = 1;
public native int b_frame_strategy(); public native AVCodecContext b_frame_strategy(int b_frame_strategy);
/**
* qscale offset between IP and B-frames
* - encoding: Set by user.
* - decoding: unused
*/
public native float b_quant_offset(); public native AVCodecContext b_quant_offset(float b_quant_offset);
/**
* Size of the frame reordering buffer in the decoder.
* For MPEG-2 it is 1 IPB or 0 low delay IP.
* - encoding: Set by libavcodec.
* - decoding: Set by libavcodec.
*/
public native int has_b_frames(); public native AVCodecContext has_b_frames(int has_b_frames);
/**
* 0-> h263 quant 1-> mpeg quant
* - encoding: Set by user.
* - decoding: unused
*/
public native int mpeg_quant(); public native AVCodecContext mpeg_quant(int mpeg_quant);
/**
* qscale factor between P and I-frames
* If > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset).
* If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).
* - encoding: Set by user.
* - decoding: unused
*/
public native float i_quant_factor(); public native AVCodecContext i_quant_factor(float i_quant_factor);
/**
* qscale offset between P and I-frames
* - encoding: Set by user.
* - decoding: unused
*/
public native float i_quant_offset(); public native AVCodecContext i_quant_offset(float i_quant_offset);
/**
* luminance masking (0-> disabled)
* - encoding: Set by user.
* - decoding: unused
*/
public native float lumi_masking(); public native AVCodecContext lumi_masking(float lumi_masking);
/**
* temporary complexity masking (0-> disabled)
* - encoding: Set by user.
* - decoding: unused
*/
public native float temporal_cplx_masking(); public native AVCodecContext temporal_cplx_masking(float temporal_cplx_masking);
/**
* spatial complexity masking (0-> disabled)
* - encoding: Set by user.
* - decoding: unused
*/
public native float spatial_cplx_masking(); public native AVCodecContext spatial_cplx_masking(float spatial_cplx_masking);
/**
* p block masking (0-> disabled)
* - encoding: Set by user.
* - decoding: unused
*/
public native float p_masking(); public native AVCodecContext p_masking(float p_masking);
/**
* darkness masking (0-> disabled)
* - encoding: Set by user.
* - decoding: unused
*/
public native float dark_masking(); public native AVCodecContext dark_masking(float dark_masking);
/**
* slice count
* - encoding: Set by libavcodec.
* - decoding: Set by user (or 0).
*/
public native int slice_count(); public native AVCodecContext slice_count(int slice_count);
/**
* prediction method (needed for huffyuv)
* - encoding: Set by user.
* - decoding: unused
*/
public native int prediction_method(); public native AVCodecContext prediction_method(int prediction_method);
public static final int
FF_PRED_LEFT = 0,
FF_PRED_PLANE = 1,
FF_PRED_MEDIAN = 2;
/**
* slice offsets in the frame in bytes
* - encoding: Set/allocated by libavcodec.
* - decoding: Set/allocated by user (or NULL).
*/
public native IntPointer slice_offset(); public native AVCodecContext slice_offset(IntPointer slice_offset);
/**
* sample aspect ratio (0 if unknown)
* That is the width of a pixel divided by the height of the pixel.
* Numerator and denominator must be relatively prime and smaller than 256 for some video standards.
* - encoding: Set by user.
* - decoding: Set by libavcodec.
*/
@ByRef public native AVRational sample_aspect_ratio();
public native AVCodecContext sample_aspect_ratio(AVRational sample_aspect_ratio);
/**
* motion estimation comparison function
* - encoding: Set by user.
* - decoding: unused
*/
public native int me_cmp(); public native AVCodecContext me_cmp(int me_cmp);
/**
* subpixel motion estimation comparison function
* - encoding: Set by user.
* - decoding: unused
*/
public native int me_sub_cmp(); public native AVCodecContext me_sub_cmp(int me_sub_cmp);
/**
* macroblock comparison function (not supported yet)
* - encoding: Set by user.
* - decoding: unused
*/
public native int mb_cmp(); public native AVCodecContext mb_cmp(int mb_cmp);
/**
* interlaced DCT comparison function
* - encoding: Set by user.
* - decoding: unused
*/
public native int ildct_cmp(); public native AVCodecContext ildct_cmp(int ildct_cmp);
public static final int
FF_CMP_SAD = 0,
FF_CMP_SSE = 1,
FF_CMP_SATD = 2,
FF_CMP_DCT = 3,
FF_CMP_PSNR = 4,
FF_CMP_BIT = 5,
FF_CMP_RD = 6,
FF_CMP_ZERO = 7,
FF_CMP_VSAD = 8,
FF_CMP_VSSE = 9,
FF_CMP_NSSE = 10,
FF_CMP_W53 = 11,
FF_CMP_W97 = 12,
FF_CMP_DCTMAX = 13,
FF_CMP_DCT264 = 14,
FF_CMP_CHROMA = 256;
/**
* ME diamond size & shape
* - encoding: Set by user.
* - decoding: unused
*/
public native int dia_size(); public native AVCodecContext dia_size(int dia_size);
/**
* amount of previous MV predictors (2a+1 x 2a+1 square)
* - encoding: Set by user.
* - decoding: unused
*/
public native int last_predictor_count(); public native AVCodecContext last_predictor_count(int last_predictor_count);
/**
* prepass for motion estimation
* - encoding: Set by user.
* - decoding: unused
*/
public native int pre_me(); public native AVCodecContext pre_me(int pre_me);
/**
* motion estimation prepass comparison function
* - encoding: Set by user.
* - decoding: unused
*/
public native int me_pre_cmp(); public native AVCodecContext me_pre_cmp(int me_pre_cmp);
/**
* ME prepass diamond size & shape
* - encoding: Set by user.
* - decoding: unused
*/
public native int pre_dia_size(); public native AVCodecContext pre_dia_size(int pre_dia_size);
/**
* subpel ME quality
* - encoding: Set by user.
* - decoding: unused
*/
public native int me_subpel_quality(); public native AVCodecContext me_subpel_quality(int me_subpel_quality);
/**
* DTG active format information (additional aspect ratio
* information only used in DVB MPEG-2 transport streams)
* 0 if not set.
*
* - encoding: unused
* - decoding: Set by decoder.
*/
public native int dtg_active_format(); public native AVCodecContext dtg_active_format(int dtg_active_format);
public static final int
FF_DTG_AFD_SAME = 8,
FF_DTG_AFD_4_3 = 9,
FF_DTG_AFD_16_9 = 10,
FF_DTG_AFD_14_9 = 11,
FF_DTG_AFD_4_3_SP_14_9 = 13,
FF_DTG_AFD_16_9_SP_14_9 = 14,
FF_DTG_AFD_SP_4_3 = 15;
/**
* maximum motion estimation search range in subpel units
* If 0 then no limit.
*
* - encoding: Set by user.
* - decoding: unused
*/
public native int me_range(); public native AVCodecContext me_range(int me_range);
/**
* intra quantizer bias
* - encoding: Set by user.
* - decoding: unused
*/
public native int intra_quant_bias(); public native AVCodecContext intra_quant_bias(int intra_quant_bias);
public static final int FF_DEFAULT_QUANT_BIAS = 999999;
/**
* inter quantizer bias
* - encoding: Set by user.
* - decoding: unused
*/
public native int inter_quant_bias(); public native AVCodecContext inter_quant_bias(int inter_quant_bias);
/**
* slice flags
* - encoding: unused
* - decoding: Set by user.
*/
public native int slice_flags(); public native AVCodecContext slice_flags(int slice_flags);
public static final int
SLICE_FLAG_CODED_ORDER = 0x0001, ///< draw_horiz_band() is called in coded order instead of display
SLICE_FLAG_ALLOW_FIELD = 0x0002, ///< allow draw_horiz_band() with field slices (MPEG2 field pics)
SLICE_FLAG_ALLOW_PLANE = 0x0004; ///< allow draw_horiz_band() with 1 component at a time (SVQ1)
/**
* XVideo Motion Acceleration
* - encoding: forbidden
* - decoding: set by decoder
*/
public native int xvmc_acceleration(); public native AVCodecContext xvmc_acceleration(int xvmc_acceleration);
/**
* macroblock decision mode
* - encoding: Set by user.
* - decoding: unused
*/
public native int mb_decision(); public native AVCodecContext mb_decision(int mb_decision);
public static final int
FF_MB_DECISION_SIMPLE = 0, ///< uses mb_cmp
FF_MB_DECISION_BITS = 1, ///< chooses the one which needs the fewest bits
FF_MB_DECISION_RD = 2; ///< rate distortion
/**
* custom intra quantization matrix
* - encoding: Set by user, can be NULL.
* - decoding: Set by libavcodec.
*/
@Cast("uint16_t*")
public native ShortPointer intra_matrix(); public native AVCodecContext intra_matrix(ShortPointer intra_matrix);
/**
* custom inter quantization matrix
* - encoding: Set by user, can be NULL.
* - decoding: Set by libavcodec.
*/
@Cast("uint16_t*")
public native ShortPointer inter_matrix(); public native AVCodecContext inter_matrix(ShortPointer inter_matrix);
/**
* scene change detection threshold
* 0 is default, larger means fewer detected scene changes.
* - encoding: Set by user.
* - decoding: unused
*/
public native int scenechange_threshold(); public native AVCodecContext scenechange_threshold(int scenechange_threshold);
/**
* noise reduction strength
* - encoding: Set by user.
* - decoding: unused
*/
public native int noise_reduction(); public native AVCodecContext noise_reduction(int noise_reduction);
/**
* Motion estimation threshold below which no motion estimation is
* performed, but instead the user specified motion vectors are used.
*
* - encoding: Set by user.
* - decoding: unused
*/
public native int me_threshold(); public native AVCodecContext me_threshold(int me_threshold);
/**
* Macroblock threshold below which the user specified macroblock types will be used.
* - encoding: Set by user.
* - decoding: unused
*/
public native int mb_threshold(); public native AVCodecContext mb_threshold(int mb_threshold);
/**
* precision of the intra DC coefficient - 8
* - encoding: Set by user.
* - decoding: unused
*/
public native int intra_dc_precision(); public native AVCodecContext intra_dc_precision(int intra_dc_precision);
/**
* Number of macroblock rows at the top which are skipped.
* - encoding: unused
* - decoding: Set by user.
*/
public native int skip_top(); public native AVCodecContext skip_top(int skip_top);
/**
* Number of macroblock rows at the bottom which are skipped.
* - encoding: unused
* - decoding: Set by user.
*/
public native int skip_bottom(); public native AVCodecContext skip_bottom(int skip_bottom);
/**
* Border processing masking, raises the quantizer for mbs on the borders
* of the picture.
* - encoding: Set by user.
* - decoding: unused
*/
public native float border_masking(); public native AVCodecContext border_masking(float border_masking);
/**
* minimum MB lagrange multipler
* - encoding: Set by user.
* - decoding: unused
*/
public native int mb_lmin(); public native AVCodecContext mb_lmin(int mb_lmin);
/**
* maximum MB lagrange multipler
* - encoding: Set by user.
* - decoding: unused
*/
public native int mb_lmax(); public native AVCodecContext mb_lmax(int mb_lmax);
/**
*
* - encoding: Set by user.
* - decoding: unused
*/
public native int me_penalty_compensation(); public native AVCodecContext me_penalty_compensation(int me_penalty_compensation);
/**
*
* - encoding: Set by user.
* - decoding: unused
*/
public native int bidir_refine(); public native AVCodecContext bidir_refine(int bidir_refine);
/**
*
* - encoding: Set by user.
* - decoding: unused
*/
public native int brd_scale(); public native AVCodecContext brd_scale(int brd_scale);
/**
* minimum GOP size
* - encoding: Set by user.
* - decoding: unused
*/
public native int keyint_min(); public native AVCodecContext keyint_min(int keyint_min);
/**
* number of reference frames
* - encoding: Set by user.
* - decoding: Set by lavc.
*/
public native int refs(); public native AVCodecContext refs(int refs);
/**
* chroma qp offset from luma
* - encoding: Set by user.
* - decoding: unused
*/
public native int chromaoffset(); public native AVCodecContext chromaoffset(int chromaoffset);
/**
* Multiplied by qscale for each frame and added to scene_change_score.
* - encoding: Set by user.
* - decoding: unused
*/
public native int scenechange_factor(); public native AVCodecContext scenechange_factor(int scenechange_factor);
/**
*
* Note: Value depends upon the compare function used for fullpel ME.
* - encoding: Set by user.
* - decoding: unused
*/
public native int mv0_threshold(); public native AVCodecContext mv0_threshold(int mv0_threshold);
/**
* Adjust sensitivity of b_frame_strategy 1.
* - encoding: Set by user.
* - decoding: unused
*/
public native int b_sensitivity(); public native AVCodecContext b_sensitivity(int b_sensitivity);
/**
* Chromaticity coordinates of the source primaries.
* - encoding: Set by user
* - decoding: Set by libavcodec
*/
@Cast("AVColorPrimaries")
public native int color_primaries(); public native AVCodecContext color_primaries(int color_primaries);
/**
* Color Transfer Characteristic.
* - encoding: Set by user
* - decoding: Set by libavcodec
*/
@Cast("AVColorTransferCharacteristic")
public native int color_trc(); public native AVCodecContext color_trc(int color_trc);
/**
* YUV colorspace type.
* - encoding: Set by user
* - decoding: Set by libavcodec
*/
@Cast("AVColorSpace")
public native int colorspace(); public native AVCodecContext colorspace(int colorspace);
/**
* MPEG vs JPEG YUV range.
* - encoding: Set by user
* - decoding: Set by libavcodec
*/
@Cast("AVColorRange")
public native int color_range(); public native AVCodecContext color_range(int color_range);
/**
* This defines the location of chroma samples.
* - encoding: Set by user
* - decoding: Set by libavcodec
*/
@Cast("AVChromaLocation")
public native int chroma_sample_location(); public native AVCodecContext chroma_sample_location(int chroma_sample_location);
/**
* Number of slices.
* Indicates number of picture subdivisions. Used for parallelized
* decoding.
* - encoding: Set by user
* - decoding: unused
*/
public native int slices(); public native AVCodecContext slices(int slices);
/** Field order
* - encoding: set by libavcodec
* - decoding: Set by libavcodec
*/
@Cast("AVFieldOrder")
public native int field_order(); public native AVCodecContext field_order(int field_order);
/* audio only */
public native int sample_rate(); public native AVCodecContext sample_rate(int sample_rate); ///< samples per second
public native int channels(); public native AVCodecContext channels(int channels); ///< number of audio channels
/**
* audio sample format
* - encoding: Set by user.
* - decoding: Set by libavcodec.
*/
@Cast("AVSampleFormat")
public native int sample_fmt(); public native AVCodecContext sample_fmt(int sample_fmt); ///< sample format
/* The following data should not be initialized. */
/**
* Samples per packet, initialized when calling 'init'.
*/
public native int frame_size(); public native AVCodecContext frame_size(int frame_size);
/**
* Frame counter, set by libavcodec.
*
* - decoding: total number of frames returned from the decoder so far.
* - encoding: total number of frames passed to the encoder so far.
*
* @note the counter is not incremented if encoding/decoding resulted in
* an error.
*/
public native int frame_number(); public native AVCodecContext frame_number(int frame_number);
/**
* number of bytes per packet if constant and known or 0
* Used by some WAV based audio codecs.
*/
public native int block_align(); public native AVCodecContext block_align(int block_align);
/**
* Audio cutoff bandwidth (0 means "automatic")
* - encoding: Set by user.
* - decoding: unused
*/
public native int cutoff(); public native AVCodecContext cutoff(int cutoff);
/**
* Audio channel layout.
* - encoding: set by user.
* - decoding: set by user, may be overwritten by libavcodec.
*/
@Cast("uint64_t")
public native long channel_layout(); public native AVCodecContext channel_layout(long channel_layout);
/**
* Request decoder to use this channel layout if it can (0 for default)
* - encoding: unused
* - decoding: Set by user.
*/
@Cast("uint64_t")
public native long request_channel_layout(); public native AVCodecContext request_channel_layout(long request_channel_layout);
/**
* Type of service that the audio stream conveys.
* - encoding: Set by user.
* - decoding: Set by libavcodec.
*/
@Cast("AVAudioServiceType")
public native int audio_service_type(); public native AVCodecContext audio_service_type(int audio_service_type);
/**
* desired sample format
* - encoding: Not used.
* - decoding: Set by user.
* Decoder will decode to this format if it can.
*/
@Cast("AVSampleFormat")
public native int request_sample_fmt(); public native AVCodecContext request_sample_fmt(int request_sample_fmt);
/**
* Called at the beginning of each frame to get a buffer for it.
*
* The function will set AVFrame.data[], AVFrame.linesize[].
* AVFrame.extended_data[] must also be set, but it should be the same as
* AVFrame.data[] except for planar audio with more channels than can fit
* in AVFrame.data[]. In that case, AVFrame.data[] shall still contain as
* many data pointers as it can hold.
*
* if CODEC_CAP_DR1 is not set then get_buffer() must call
* avcodec_default_get_buffer() instead of providing buffers allocated by
* some other means.
*
* AVFrame.data[] should be 32- or 16-byte-aligned unless the CPU doesn't
* need it. avcodec_default_get_buffer() aligns the output buffer properly,
* but if get_buffer() is overridden then alignment considerations should
* be taken into account.
*
* @see avcodec_default_get_buffer()
*
* Video:
*
* If pic.reference is set then the frame will be read later by libavcodec.
* avcodec_align_dimensions2() should be used to find the required width and
* height, as they normally need to be rounded up to the next multiple of 16.
*
* If frame multithreading is used and thread_safe_callbacks is set,
* it may be called from a different thread, but not from more than one at
* once. Does not need to be reentrant.
*
* @see release_buffer(), reget_buffer()
* @see avcodec_align_dimensions2()
*
* Audio:
*
* Decoders request a buffer of a particular size by setting
* AVFrame.nb_samples prior to calling get_buffer(). The decoder may,
* however, utilize only part of the buffer by setting AVFrame.nb_samples
* to a smaller value in the output frame.
*
* Decoders cannot use the buffer after returning from
* avcodec_decode_audio4(), so they will not call release_buffer(), as it
* is assumed to be released immediately upon return.
*
* As a convenience, av_samples_get_buffer_size() and
* av_samples_fill_arrays() in libavutil may be used by custom get_buffer()
* functions to find the required data size and to fill data pointers and
* linesize. In AVFrame.linesize, only linesize[0] may be set for audio
* since all planes must be the same size.
*
* @see av_samples_get_buffer_size(), av_samples_fill_arrays()
*
* - encoding: unused
* - decoding: Set by libavcodec, user can override.
*/
public static class Get_buffer extends FunctionPointer {
static { load(); }
public Get_buffer(Pointer p) { super(p); }
protected Get_buffer() { allocate(); }
protected final native void allocate();
public native int call(AVCodecContext c, AVFrame pic);
}
public native Get_buffer get_buffer(); public native AVCodecContext get_buffer(Get_buffer get_buffer);
/**
* Called to release buffers which were allocated with get_buffer.
* A released buffer can be reused in get_buffer().
* pic.data[*] must be set to NULL.
* May be called from a different thread if frame multithreading is used,
* but not by more than one thread at once, so does not need to be reentrant.
* - encoding: unused
* - decoding: Set by libavcodec, user can override.
*/
public static class Release_buffer extends FunctionPointer {
static { load(); }
public Release_buffer(Pointer p) { super(p); }
protected Release_buffer() { allocate(); }
protected final native void allocate();
public native void call(AVCodecContext c, AVFrame pic);
}
public native Release_buffer release_buffer(); public native AVCodecContext release_buffer(Release_buffer release_buffer);
/**
* Called at the beginning of a frame to get cr buffer for it.
* Buffer type (size, hints) must be the same. libavcodec won't check it.
* libavcodec will pass previous buffer in pic, function should return
* same buffer or new buffer with old frame "painted" into it.
* If pic.data[0] == NULL must behave like get_buffer().
* if CODEC_CAP_DR1 is not set then reget_buffer() must call
* avcodec_default_reget_buffer() instead of providing buffers allocated by
* some other means.
* - encoding: unused
* - decoding: Set by libavcodec, user can override.
*/
public static class Reget_buffer extends FunctionPointer {
static { load(); }
public Reget_buffer(Pointer p) { super(p); }
protected Reget_buffer() { allocate(); }
protected final native void allocate();
public native int call(AVCodecContext c, AVFrame pic);
}
public native Reget_buffer reget_buffer(); public native AVCodecContext reget_buffer(Reget_buffer reget_buffer);
/* - encoding parameters */ ///< amount of qscale change between easy & hard scenes (0.0-1.0)
public native float qcompress(); public native AVCodecContext qcompress(float qcompress);
///< amount of qscale smoothing over time (0.0-1.0)
public native float qblur(); public native AVCodecContext qblur(float qblur);
/**
* minimum quantizer
* - encoding: Set by user.
* - decoding: unused
*/
public native int qmin(); public native AVCodecContext qmin(int qmin);
/**
* maximum quantizer
* - encoding: Set by user.
* - decoding: unused
*/
public native int qmax(); public native AVCodecContext qmax(int qmax);
/**
* maximum quantizer difference between frames
* - encoding: Set by user.
* - decoding: unused
*/
public native int max_qdiff(); public native AVCodecContext max_qdiff(int max_qdiff);
/**
* ratecontrol qmin qmax limiting method
* 0-> clipping, 1-> use a nice continous function to limit qscale wthin qmin/qmax.
* - encoding: Set by user.
* - decoding: unused
*/
public native float rc_qsquish(); public native AVCodecContext rc_qsquish(float rc_qsquish);
public native float rc_qmod_amp(); public native AVCodecContext rc_qmod_amp(float rc_qmod_amp);
public native int rc_qmod_freq(); public native AVCodecContext rc_qmod_freq(int rc_qmod_freq);
/**
* decoder bitstream buffer size
* - encoding: Set by user.
* - decoding: unused
*/
public native int rc_buffer_size(); public native AVCodecContext rc_buffer_size(int rc_buffer_size);
/**
* ratecontrol override, see RcOverride
* - encoding: Allocated/set/freed by user.
* - decoding: unused
*/
public native int rc_override_count(); public native AVCodecContext rc_override_count(int rc_override_count);
public native RcOverride rc_override(); public native AVCodecContext rc_override(RcOverride rc_override);
/**
* rate control equation
* - encoding: Set by user
* - decoding: unused
*/
@Cast("const char*")
public native BytePointer rc_eq(); public native AVCodecContext rc_eq(BytePointer rc_eq);
/**
* maximum bitrate
* - encoding: Set by user.
* - decoding: unused
*/
public native int rc_max_rate(); public native AVCodecContext rc_max_rate(int rc_max_rate);
/**
* minimum bitrate
* - encoding: Set by user.
* - decoding: unused
*/
public native int rc_min_rate(); public native AVCodecContext rc_min_rate(int rc_min_rate);
public native float rc_buffer_aggressivity(); public native AVCodecContext rc_buffer_aggressivity(float rc_buffer_aggressivity);
/**
* initial complexity for pass1 ratecontrol
* - encoding: Set by user.
* - decoding: unused
*/
public native float rc_initial_cplx(); public native AVCodecContext rc_initial_cplx(float rc_initial_cplx);
/**
* Ratecontrol attempt to use, at maximum, <value> of what can be used without an underflow.
* - encoding: Set by user.
* - decoding: unused.
*/
public native float rc_max_available_vbv_use(); public native AVCodecContext rc_max_available_vbv_use(float rc_max_available_vbv_use);
/**
* Ratecontrol attempt to use, at least, <value> times the amount needed to prevent a vbv overflow.
* - encoding: Set by user.
* - decoding: unused.
*/
public native float rc_min_vbv_overflow_use(); public native AVCodecContext rc_min_vbv_overflow_use(float rc_min_vbv_overflow_use);
/**
* Number of bits which should be loaded into the rc buffer before decoding starts.
* - encoding: Set by user.
* - decoding: unused
*/
public native int rc_initial_buffer_occupancy(); public native AVCodecContext rc_initial_buffer_occupancy(int rc_initial_buffer_occupancy);
public static final int
FF_CODER_TYPE_VLC = 0,
FF_CODER_TYPE_AC = 1,
FF_CODER_TYPE_RAW = 2,
FF_CODER_TYPE_RLE = 3,
FF_CODER_TYPE_DEFLATE = 4;
/**
* coder type
* - encoding: Set by user.
* - decoding: unused
*/
public native int coder_type(); public native AVCodecContext coder_type(int coder_type);
/**
* context model
* - encoding: Set by user.
* - decoding: unused
*/
public native int context_model(); public native AVCodecContext context_model(int context_model);
/**
* minimum Lagrange multipler
* - encoding: Set by user.
* - decoding: unused
*/
public native int lmin(); public native AVCodecContext lmin(int lmin);
/**
* maximum Lagrange multipler
* - encoding: Set by user.
* - decoding: unused
*/
public native int lmax(); public native AVCodecContext lmax(int lmax);
/**
* frame skip threshold
* - encoding: Set by user.
* - decoding: unused
*/
public native int frame_skip_threshold(); public native AVCodecContext frame_skip_threshold(int frame_skip_threshold);
/**
* frame skip factor
* - encoding: Set by user.
* - decoding: unused
*/
public native int frame_skip_factor(); public native AVCodecContext frame_skip_factor(int frame_skip_factor);
/**
* frame skip exponent
* - encoding: Set by user.
* - decoding: unused
*/
public native int frame_skip_exp(); public native AVCodecContext frame_skip_exp(int frame_skip_exp);
/**
* frame skip comparison function
* - encoding: Set by user.
* - decoding: unused
*/
public native int frame_skip_cmp(); public native AVCodecContext frame_skip_cmp(int frame_skip_cmp);
/**
* trellis RD quantization
* - encoding: Set by user.
* - decoding: unused
*/
public native int trellis(); public native AVCodecContext trellis(int trellis);
/**
* - encoding: Set by user.
* - decoding: unused
*/
public native int min_prediction_order(); public native AVCodecContext min_prediction_order(int min_prediction_order);
/**
* - encoding: Set by user.
* - decoding: unused
*/
public native int max_prediction_order(); public native AVCodecContext max_prediction_order(int max_prediction_order);
/**
* GOP timecode frame start number
* - encoding: Set by user, in non drop frame format
* - decoding: Set by libavcodec (timecode in the 25 bits format, -1 if unset)
*/
public native long timecode_frame_start(); public native AVCodecContext timecode_frame_start(long timecode_frame_start);
/* The RTP callback: This function is called */
/* every time the encoder has a packet to send. */
/* It depends on the encoder if the data starts */
/* with a Start Code (it should). H.263 does. */
/* mb_nb contains the number of macroblocks */
/* encoded in the RTP payload. */
public static class Rtp_callback extends FunctionPointer {
static { load(); }
public Rtp_callback(Pointer p) { super(p); }
protected Rtp_callback() { allocate(); }
protected final native void allocate();
public native void call(AVCodecContext avctx, Pointer data, int size, int mb_nb);
}
public native Rtp_callback rtp_callback(); public native AVCodecContext rtp_callback(Rtp_callback rtp_callback);
/* The size of the RTP payload: the coder will */
/* do its best to deliver a chunk with size */
/* below rtp_payload_size, the chunk will start */
/* with a start code on some codecs like H.263. */
/* This doesn't take account of any particular */
/* headers inside the transmitted RTP payload. */
public native int rtp_payload_size(); public native AVCodecContext rtp_payload_size(int rtp_payload_size);
/* statistics, used for 2-pass encoding */
public native int mv_bits(); public native AVCodecContext mv_bits(int mv_bits);
public native int header_bits(); public native AVCodecContext header_bits(int header_bits);
public native int i_tex_bits(); public native AVCodecContext i_tex_bits(int i_tex_bits);
public native int p_tex_bits(); public native AVCodecContext p_tex_bits(int p_tex_bits);
public native int i_count(); public native AVCodecContext i_count(int i_count);
public native int p_count(); public native AVCodecContext p_count(int p_count);
public native int skip_count(); public native AVCodecContext skip_count(int skip_count);
public native int misc_bits(); public native AVCodecContext misc_bits(int misc_bits);
/**
* number of bits used for the previously encoded frame
* - encoding: Set by libavcodec.
* - decoding: unused
*/
public native int frame_bits(); public native AVCodecContext frame_bits(int frame_bits);
/**
* pass1 encoding statistics output buffer
* - encoding: Set by libavcodec.
* - decoding: unused
*/
@Cast("char*")
public native BytePointer stats_out(); public native AVCodecContext stats_out(BytePointer stats_out);
/**
* pass2 encoding statistics input buffer
* Concatenated stuff from stats_out of pass1 should be placed here.
* - encoding: Allocated/set/freed by user.
* - decoding: unused
*/
@Cast("char*")
public native BytePointer stats_in(); public native AVCodecContext stats_in(BytePointer stats_in);
/**
* Work around bugs in encoders which sometimes cannot be detected automatically.
* - encoding: Set by user
* - decoding: Set by user
*/
public native int workaround_bugs(); public native AVCodecContext workaround_bugs(int workaround_bugs);
public static final int
FF_BUG_AUTODETECT = 1, ///< autodetection
FF_BUG_OLD_MSMPEG4 = 2,
FF_BUG_XVID_ILACE = 4,
FF_BUG_UMP4 = 8,
FF_BUG_NO_PADDING = 16,
FF_BUG_AMV = 32,
FF_BUG_AC_VLC = 0, ///< Will be removed, libavcodec can now handle these non-compliant files by default.
FF_BUG_QPEL_CHROMA = 64,
FF_BUG_STD_QPEL = 128,
FF_BUG_QPEL_CHROMA2 = 256,
FF_BUG_DIRECT_BLOCKSIZE = 512,
FF_BUG_EDGE = 1024,
FF_BUG_HPEL_CHROMA = 2048,
FF_BUG_DC_CLIP = 4096,
FF_BUG_MS = 8192, ///< Work around various bugs in Microsoft's broken decoders.
FF_BUG_TRUNCATED = 16384;
/**
* strictly follow the standard (MPEG4, ...).
* - encoding: Set by user.
* - decoding: Set by user.
* Setting this to STRICT or higher means the encoder and decoder will
* generally do stupid things, whereas setting it to unofficial or lower
* will mean the encoder might produce output that is not supported by all
* spec-compliant decoders. Decoders don't differentiate between normal,
* unofficial and experimental (that is, they always try to decode things
* when they can) unless they are explicitly asked to behave stupidly
* (=strictly conform to the specs)
*/
public native int strict_std_compliance(); public native AVCodecContext strict_std_compliance(int strict_std_compliance);
public static final int
FF_COMPLIANCE_VERY_STRICT = 2, ///< Strictly conform to an older more strict version of the spec or reference software.
FF_COMPLIANCE_STRICT = 1, ///< Strictly conform to all the things in the spec no matter what consequences.
FF_COMPLIANCE_NORMAL = 0,
FF_COMPLIANCE_UNOFFICIAL = -1, ///< Allow unofficial extensions
FF_COMPLIANCE_EXPERIMENTAL = -2; ///< Allow nonstandardized experimental things.
/**
* error concealment flags
* - encoding: unused
* - decoding: Set by user.
*/
public native int error_concealment(); public native AVCodecContext error_concealment(int error_concealment);
public static final int
FF_EC_GUESS_MVS = 1,
FF_EC_DEBLOCK = 2;
/**
* debug
* - encoding: Set by user.
* - decoding: Set by user.
*/
public native int debug(); public native AVCodecContext debug(int debug);
public static final int
FF_DEBUG_PICT_INFO = 1,
FF_DEBUG_RC = 2,
FF_DEBUG_BITSTREAM = 4,
FF_DEBUG_MB_TYPE = 8,
FF_DEBUG_QP = 16,
FF_DEBUG_MV = 32,
FF_DEBUG_DCT_COEFF = 0x00000040,
FF_DEBUG_SKIP = 0x00000080,
FF_DEBUG_STARTCODE = 0x00000100,
FF_DEBUG_PTS = 0x00000200,
FF_DEBUG_ER = 0x00000400,
FF_DEBUG_MMCO = 0x00000800,
FF_DEBUG_BUGS = 0x00001000,
FF_DEBUG_VIS_QP = 0x00002000,
FF_DEBUG_VIS_MB_TYPE = 0x00004000,
FF_DEBUG_BUFFERS = 0x00008000,
FF_DEBUG_THREADS = 0x00010000;
/**
* debug
* - encoding: Set by user.
* - decoding: Set by user.
*/
public native int debug_mv(); public native AVCodecContext debug_mv(int debug_mv);
public static final int
FF_DEBUG_VIS_MV_P_FOR = 0x00000001, //visualize forward predicted MVs of P frames
FF_DEBUG_VIS_MV_B_FOR = 0x00000002, //visualize forward predicted MVs of B frames
FF_DEBUG_VIS_MV_B_BACK = 0x00000004; //visualize backward predicted MVs of B frames
/**
* Error recognition; may misdetect some more or less valid parts as errors.
* - encoding: unused
* - decoding: Set by user.
*/
public native int err_recognition(); public native AVCodecContext err_recognition(int err_recognition);
public static final int
AV_EF_CRCCHECK = (1<<0),
AV_EF_BITSTREAM = (1<<1),
AV_EF_BUFFER = (1<<2),
AV_EF_EXPLODE = (1<<3),
AV_EF_CAREFUL = (1<<16),
AV_EF_COMPLIANT = (1<<17),
AV_EF_AGGRESSIVE = (1<<18);
/**
* Hardware accelerator in use
* - encoding: unused.
* - decoding: Set by libavcodec
*/
public native AVHWAccel hwaccel(); public native AVCodecContext hwaccel(AVHWAccel hwaccel);
/**
* Hardware accelerator context.
* For some hardware accelerators, a global context needs to be
* provided by the user. In that case, this holds display-dependent
* data FFmpeg cannot instantiate itself. Please refer to the
* FFmpeg HW accelerator documentation to know how to fill this
* is. e.g. for VA API, this is a struct vaapi_context.
* - encoding: unused
* - decoding: Set by user
*/
public native Pointer hwaccel_context(); public native AVCodecContext hwaccel_context(Pointer hwaccel_context);
/**
* error
* - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR.
* - decoding: unused
*/
@Cast("uint64_t") // uint64_t error[AV_NUM_DATA_POINTERS];
public native long error(int i); public native AVCodecContext error(int i, long error);
/**
* DCT algorithm, see FF_DCT_* below
* - encoding: Set by user.
* - decoding: unused
*/
public native int dct_algo(); public native AVCodecContext dct_algo(int dct_algo);
public static final int
FF_DCT_AUTO = 0,
FF_DCT_FASTINT = 1,
FF_DCT_INT = 2,
FF_DCT_MMX = 3,
FF_DCT_ALTIVEC = 5,
FF_DCT_FAAN = 6;
/**
* IDCT algorithm, see FF_IDCT_* below.
* - encoding: Set by user.
* - decoding: Set by user.
*/
public native int idct_algo(); public native AVCodecContext idct_algo(int idct_algo);
public static final int
FF_IDCT_AUTO = 0,
FF_IDCT_INT = 1,
FF_IDCT_SIMPLE = 2,
FF_IDCT_SIMPLEMMX = 3,
FF_IDCT_LIBMPEG2MMX = 4,
FF_IDCT_MMI = 5,
FF_IDCT_ARM = 7,
FF_IDCT_ALTIVEC = 8,
FF_IDCT_SH4 = 9,
FF_IDCT_SIMPLEARM = 10,
FF_IDCT_H264 = 11,
FF_IDCT_VP3 = 12,
FF_IDCT_IPP = 13,
FF_IDCT_XVIDMMX = 14,
FF_IDCT_CAVS = 15,
FF_IDCT_SIMPLEARMV5TE = 16,
FF_IDCT_SIMPLEARMV6 = 17,
FF_IDCT_SIMPLEVIS = 18,
FF_IDCT_WMV2 = 19,
FF_IDCT_FAAN = 20,
FF_IDCT_EA = 21,
FF_IDCT_SIMPLENEON = 22,
FF_IDCT_SIMPLEALPHA = 23,
FF_IDCT_BINK = 24;
/**
* bits per sample/pixel from the demuxer (needed for huffyuv).
* - encoding: Set by libavcodec.
* - decoding: Set by user.
*/
public native int bits_per_coded_sample(); public native AVCodecContext bits_per_coded_sample(int bits_per_coded_sample);
/**
* Bits per sample/pixel of internal libavcodec pixel/sample format.
* - encoding: set by user.
* - decoding: set by libavcodec.
*/
public native int bits_per_raw_sample(); public native AVCodecContext bits_per_raw_sample(int bits_per_raw_sample);
/**
* low resolution decoding, 1-> 1/2 size, 2->1/4 size
* - encoding: unused
* - decoding: Set by user.
*/
public native int lowres(); public native AVCodecContext lowres(int lowres);
/**
* the picture in the bitstream
* - encoding: Set by libavcodec.
* - decoding: Set by libavcodec.
*/
public native AVFrame coded_frame(); public native AVCodecContext coded_frame(AVFrame coded_frame);
/**
* thread count
* is used to decide how many independent tasks should be passed to execute()
* - encoding: Set by user.
* - decoding: Set by user.
*/
public native int thread_count(); public native AVCodecContext thread_count(int thread_count);
/**
* Which multithreading methods to use.
* Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread,
* so clients which cannot provide future frames should not use it.
*
* - encoding: Set by user, otherwise the default is used.
* - decoding: Set by user, otherwise the default is used.
*/
public native int thread_type(); public native AVCodecContext thread_type(int thread_type);
public static final int
FF_THREAD_FRAME = 1, ///< Decode more than one frame at once
FF_THREAD_SLICE = 2; ///< Decode more than one part of a single frame at once
/**
* Which multithreading methods are in use by the codec.
* - encoding: Set by libavcodec.
* - decoding: Set by libavcodec.
*/
public native int active_thread_type(); public native AVCodecContext active_thread_type(int active_thread_type);
/**
* Set by the client if its custom get_buffer() callback can be called
* synchronously from another thread, which allows faster multithreaded decoding.
* draw_horiz_band() will be called from other threads regardless of this setting.
* Ignored if the default get_buffer() is used.
* - encoding: Set by user.
* - decoding: Set by user.
*/
public native int thread_safe_callbacks(); public native AVCodecContext thread_safe_callbacks(int thread_safe_callbacks);
/**
* The codec may call this to execute several independent things.
* It will return only after finishing all tasks.
* The user may replace this with some multithreaded implementation,
* the default implementation will execute the parts serially.
* @param count the number of things to execute
* - encoding: Set by libavcodec, user can override.
* - decoding: Set by libavcodec, user can override.
*/
public static class Execute extends FunctionPointer {
static { load(); }
public static class Func extends FunctionPointer {
static { load(); }
public Func(Pointer p) { super(p); }
protected Func() { allocate(); }
protected final native void allocate();
public native int call(AVCodecContext c2, Pointer arg);
}
public Execute(Pointer p) { super(p); }
protected Execute() { allocate(); }
protected final native void allocate();
public native int call(AVCodecContext c, Func func, Pointer arg2, IntPointer ret, int count, int size);
}
public native Execute execute(); public native AVCodecContext execute(Execute execute);
/**
* The codec may call this to execute several independent things.
* It will return only after finishing all tasks.
* The user may replace this with some multithreaded implementation,
* the default implementation will execute the parts serially.
* Also see avcodec_thread_init and e.g. the --enable-pthread configure option.
* @param c context passed also to func
* @param count the number of things to execute
* @param arg2 argument passed unchanged to func
* @param ret return values of executed functions, must have space for "count" values. May be NULL.
* @param func function that will be called count times, with jobnr from 0 to count-1.
* threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no
* two instances of func executing at the same time will have the same threadnr.
* @return always 0 currently, but code should handle a future improvement where when any call to func
* returns < 0 no further calls to func may be done and < 0 is returned.
* - encoding: Set by libavcodec, user can override.
* - decoding: Set by libavcodec, user can override.
*/
public static class Execute2 extends FunctionPointer {
static { load(); }
public static class Func2 extends FunctionPointer {
static { load(); }
public Func2(Pointer p) { super(p); }
protected Func2() { allocate(); }
protected final native void allocate();
public native int call(AVCodecContext c2, Pointer arg, int jobnr, int threadnr);
}
public Execute2(Pointer p) { super(p); }
protected Execute2() { allocate(); }
protected final native void allocate();
public native int call(AVCodecContext c, Func2 func2, Pointer arg2, IntPointer ret, int count);
}
public native Execute2 execute2(); public native AVCodecContext execute2(Execute2 execute2);
/**
* thread opaque
* Can be used by execute() to store some per AVCodecContext stuff.
* - encoding: set by execute()
* - decoding: set by execute()
*/
public native Pointer thread_opaque(); public native AVCodecContext thread_opaque(Pointer thread_opaque);
/**
* noise vs. sse weight for the nsse comparsion function
* - encoding: Set by user.
* - decoding: unused
*/
public native int nsse_weight(); public native AVCodecContext nsse_weight(int nsse_weight);
/**
* profile
* - encoding: Set by user.
* - decoding: Set by libavcodec.
*/
public native int profile(); public native AVCodecContext profile(int profile);
public static final int
FF_PROFILE_UNKNOWN = -99,
FF_PROFILE_RESERVED = -100,
FF_PROFILE_AAC_MAIN = 0,
FF_PROFILE_AAC_LOW = 1,
FF_PROFILE_AAC_SSR = 2,
FF_PROFILE_AAC_LTP = 3,
FF_PROFILE_DTS = 20,
FF_PROFILE_DTS_ES = 30,
FF_PROFILE_DTS_96_24 = 40,
FF_PROFILE_DTS_HD_HRA = 50,
FF_PROFILE_DTS_HD_MA = 60,
FF_PROFILE_MPEG2_422 = 0,
FF_PROFILE_MPEG2_HIGH = 1,
FF_PROFILE_MPEG2_SS = 2,
FF_PROFILE_MPEG2_SNR_SCALABLE = 3,
FF_PROFILE_MPEG2_MAIN = 4,
FF_PROFILE_MPEG2_SIMPLE = 5,
FF_PROFILE_H264_CONSTRAINED = (1<<9), // 8+1; constraint_set1_flag
FF_PROFILE_H264_INTRA = (1<<11), // 8+3; constraint_set3_flag
FF_PROFILE_H264_BASELINE = 66,
FF_PROFILE_H264_CONSTRAINED_BASELINE = (66|FF_PROFILE_H264_CONSTRAINED),
FF_PROFILE_H264_MAIN = 77,
FF_PROFILE_H264_EXTENDED = 88,
FF_PROFILE_H264_HIGH = 100,
FF_PROFILE_H264_HIGH_10 = 110,
FF_PROFILE_H264_HIGH_10_INTRA = (110|FF_PROFILE_H264_INTRA),
FF_PROFILE_H264_HIGH_422 = 122,
FF_PROFILE_H264_HIGH_422_INTRA = (122|FF_PROFILE_H264_INTRA),
FF_PROFILE_H264_HIGH_444 = 144,
FF_PROFILE_H264_HIGH_444_PREDICTIVE = 244,
FF_PROFILE_H264_HIGH_444_INTRA = (244|FF_PROFILE_H264_INTRA),
FF_PROFILE_H264_CAVLC_444 = 44,
FF_PROFILE_VC1_SIMPLE = 0,
FF_PROFILE_VC1_MAIN = 1,
FF_PROFILE_VC1_COMPLEX = 2,
FF_PROFILE_VC1_ADVANCED = 3,
FF_PROFILE_MPEG4_SIMPLE = 0,
FF_PROFILE_MPEG4_SIMPLE_SCALABLE = 1,
FF_PROFILE_MPEG4_CORE = 2,
FF_PROFILE_MPEG4_MAIN = 3,
FF_PROFILE_MPEG4_N_BIT = 4,
FF_PROFILE_MPEG4_SCALABLE_TEXTURE = 5,
FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION = 6,
FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE = 7,
FF_PROFILE_MPEG4_HYBRID = 8,
FF_PROFILE_MPEG4_ADVANCED_REAL_TIME = 9,
FF_PROFILE_MPEG4_CORE_SCALABLE = 10,
FF_PROFILE_MPEG4_ADVANCED_CODING = 11,
FF_PROFILE_MPEG4_ADVANCED_CORE = 12,
FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE = 13,
FF_PROFILE_MPEG4_SIMPLE_STUDIO = 14,
FF_PROFILE_MPEG4_ADVANCED_SIMPLE = 15;
/**
* level
* - encoding: Set by user.
* - decoding: Set by libavcodec.
*/
public native int level(); public native AVCodecContext level(int level);
public static final int FF_LEVEL_UNKNOWN = -99;
/**
*
* - encoding: unused
* - decoding: Set by user.
*/
@Cast("AVDiscard")
public native int skip_loop_filter(); public native AVCodecContext skip_loop_filter(int skip_loop_filter);
/**
*
* - encoding: unused
* - decoding: Set by user.
*/
@Cast("AVDiscard")
public native int skip_idct(); public native AVCodecContext skip_idct(int skip_idct);
/**
*
* - encoding: unused
* - decoding: Set by user.
*/
@Cast("AVDiscard")
public native int skip_frame(); public native AVCodecContext skip_frame(int skip_frame);
/**
* Header containing style information for text subtitles.
* For SUBTITLE_ASS subtitle type, it should contain the whole ASS
* [Script Info] and [V4+ Styles] section, plus the [Events] line and
* the Format line following. It shouldn't include any Dialogue line.
* - encoding: Set/allocated/freed by user (before avcodec_open2())
* - decoding: Set/allocated/freed by libavcodec (by avcodec_open2())
*/
@Cast("uint8_t*")
public native BytePointer subtitle_header(); public native AVCodecContext subtitle_header(BytePointer subtitle_header);
public native int subtitle_header_size(); public native AVCodecContext subtitle_header_size(int subtitle_header_size);
/**
* Simulates errors in the bitstream to test error concealment.
* - encoding: Set by user.
* - decoding: unused
*/
public native int error_rate(); public native AVCodecContext error_rate(int error_rate);
/**
* Current packet as passed into the decoder, to avoid having
* to pass the packet into every function. Currently only valid
* inside lavc and get/release_buffer callbacks.
* - decoding: set by avcodec_decode_*, read by get_buffer() for setting pkt_pts
* - encoding: unused
*/
public native AVPacket pkt(); public native AVCodecContext pkt(AVPacket subtitle_header);
/**
* VBV delay coded in the last frame (in periods of a 27 MHz clock).
* Used for compliant TS muxing.
* - encoding: Set by libavcodec.
* - decoding: unused.
*/
public native long vbv_delay(); public native AVCodecContext vbv_delay(long vbv_delay);
/**
* Current statistics for PTS correction.
* - decoding: maintained and used by libavcodec, not intended to be used by user apps
* - encoding: unused
*/
public native long pts_correction_num_faulty_pts(); /// Number of incorrect PTS values so far
public native AVCodecContext pts_correction_num_faulty_pts(long pts_correction_num_faulty_pts);
public native long pts_correction_num_faulty_dts(); /// Number of incorrect DTS values so far
public native AVCodecContext pts_correction_num_faulty_dts(long pts_correction_num_faulty_dts);
public native long pts_correction_last_pts(); /// PTS of the last frame
public native AVCodecContext pts_correction_last_pts(long pts_correction_last_pts);
public native long pts_correction_last_dts(); /// DTS of the last frame
public native AVCodecContext pts_correction_last_dts(long pts_correction_last_dts);
}
/**
* AVProfile.
*/
public static class AVProfile extends Pointer {
static { load(); }
public AVProfile() { allocate(); }
public AVProfile(int size) { allocateArray(size); }
public AVProfile(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(int size);
@Override public AVProfile position(int position) {
return (AVProfile)super.position(position);
}
public native int profile(); public native AVProfile profile(int profile);
@Cast("const char*") ///< short name for the profile
public native BytePointer name(); public native AVProfile name(BytePointer name);
}
@Opaque public static class AVCodecDefault extends Pointer {
public AVCodecDefault() { }
public AVCodecDefault(Pointer p) { super(p); }
}
/**
* AVCodec.
*/
public static class AVCodec extends Pointer {
static { load(); }
public AVCodec() { allocate(); }
public AVCodec(int size) { allocateArray(size); }
public AVCodec(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(int size);
@Override public AVCodec position(int position) {
return (AVCodec)super.position(position);
}
/**
* Name of the codec implementation.
* The name is globally unique among encoders and among decoders (but an
* encoder and a decoder can share the same name).
* This is the primary way to find a codec from the user perspective.
*/
@Cast("const char*")
public native BytePointer name(); public native AVCodec name(BytePointer name);
/**
* Descriptive name for the codec, meant to be more human readable than name.
* You should use the NULL_IF_CONFIG_SMALL() macro to define it.
*/
@Cast("const char*")
public native BytePointer long_name(); public native AVCodec long_name(BytePointer long_name);
@Cast("AVMediaType")
public native int type(); public native AVCodec type(int type);
@Cast("CodecID")
public native int id(); public native AVCodec id(int id);
/**
* Codec capabilities.
* see CODEC_CAP_*
*/
public native int capabilities(); public native AVCodec capabilities(int capabilities);
@Const ///< array of supported framerates, or NULL if any, array is terminated by {0,0}
public native AVRational supported_framerates(); public native AVCodec supported_framerates(AVRational supported_framerates);
@Cast("const PixelFormat*") ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1
public native IntPointer pix_fmts(); public native AVCodec pix_fmts(IntPointer pix_fmts);
@Const ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0
public native IntPointer supported_samplerates(); public native AVCodec supported_samplerates(IntPointer supported_samplerates);
@Cast("const AVSampleFormat*") ///< array of supported sample formats, or NULL if unknown, array is terminated by -1
public native IntPointer sample_fmts(); public native AVCodec sample_fmts(IntPointer sample_fmts);
@Cast("const uint64_t*") ///< array of support channel layouts, or NULL if unknown. array is terminated by 0
public native LongPointer channel_layouts(); public native AVCodec channel_layouts(LongPointer channel_layouts);
@Cast("uint8_t") ///< maximum value for lowres supported by the decoder
public native int max_lowres(); public native AVCodec max_lowres(int max_lowres);
@Const ///< AVClass for the private context
public native AVClass priv_class(); public native AVCodec priv_class(AVClass priv_class);
@Const ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}
public native AVProfile profiles(); public native AVCodec profiles(AVProfile profiles);
}
/**
* AVHWAccel.
*/
public static class AVHWAccel extends Pointer {
static { load(); }
public AVHWAccel() { allocate(); }
public AVHWAccel(int size) { allocateArray(size); }
public AVHWAccel(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(int size);
@Override public AVHWAccel position(int position) {
return (AVHWAccel)super.position(position);
}
/**
* Name of the hardware accelerated codec.
* The name is globally unique among encoders and among decoders (but an
* encoder and a decoder can share the same name).
*/
@Cast("const char *")
public native BytePointer name(); public native AVHWAccel name(BytePointer name);
/**
* Type of codec implemented by the hardware accelerator.
*
* See AVMEDIA_TYPE_xxx
*/
@Cast("AVMediaType")
public native int type(); public native AVHWAccel type(int type);
/**
* Codec implemented by the hardware accelerator.
*
* See CODEC_ID_xxx
*/
@Cast("CodecID")
public native int id(); public native AVHWAccel id(int id);
/**
* Supported pixel format.
*
* Only hardware accelerated formats are supported here.
*/
@Cast("PixelFormat")
public native int pix_fmt(); public native AVHWAccel pix_fmt(int pix_fmt);
/**
* Hardware accelerated codec capabilities.
* see FF_HWACCEL_CODEC_CAP_*
*/
public native int capabilities(); public native AVHWAccel capabilities(int capabilities);
public native AVHWAccel next(); public native AVHWAccel next(AVHWAccel next);
/**
* Called at the beginning of each frame or field picture.
*
* Meaningful frame information (codec specific) is guaranteed to
* be parsed at this point. This function is mandatory.
*
* Note that buf can be NULL along with buf_size set to 0.
* Otherwise, this means the whole frame is available at this point.
*
* @param avctx the codec context
* @param buf the frame data buffer base
* @param buf_size the size of the frame in bytes
* @return zero if successful, a negative value otherwise
*/
public static class Start_frame extends FunctionPointer {
static { load(); }
public Start_frame(Pointer p) { super(p); }
protected Start_frame() { allocate(); }
protected final native void allocate();
public native int call(AVCodecContext avctx, @Cast("const uint8_t*") BytePointer buf, @Cast("uint32_t") int buf_size);
}
public native Start_frame start_frame(); public native AVHWAccel start_frame(Start_frame start_frame);
/**
* Callback for each slice.
*
* Meaningful slice information (codec specific) is guaranteed to
* be parsed at this point. This function is mandatory.
*
* @param avctx the codec context
* @param buf the slice data buffer base
* @param buf_size the size of the slice in bytes
* @return zero if successful, a negative value otherwise
*/
public static class Decode_slice extends FunctionPointer {
static { load(); }
public Decode_slice(Pointer p) { super(p); }
protected Decode_slice() { allocate(); }
protected final native void allocate();
public native int call(AVCodecContext avctx, @Cast("const uint8_t*") BytePointer buf, @Cast("uint32_t") int buf_size);
}
public native Decode_slice decode_slice(); public native AVHWAccel decode_slice(Decode_slice decode_slice);
/**
* Called at the end of each frame or field picture.
*
* The whole picture is parsed at this point and can now be sent
* to the hardware accelerator. This function is mandatory.
*
* @param avctx the codec context
* @return zero if successful, a negative value otherwise
*/
public static class End_frame extends FunctionPointer {
static { load(); }
public End_frame(Pointer p) { super(p); }
protected End_frame() { allocate(); }
protected final native void allocate();
public native int call(AVCodecContext avctx);
}
public native End_frame end_frame(); public native AVHWAccel end_frame(End_frame end_frame);
/**
* Size of HW accelerator private data.
*
* Private data is allocated with av_mallocz() before
* AVCodecContext.get_buffer() and deallocated after
* AVCodecContext.release_buffer().
*/
public native int priv_data_size(); public native AVHWAccel priv_data_size(int priv_data_size);
}
/**
* @defgroup lavc_picture AVPicture
*
* Functions for working with AVPicture
* @{
*/
/**
* four components are given, that's all.
* the last component is alpha
*/
public static class AVPicture extends Pointer {
static { load(); }
public AVPicture() { allocate(); }
public AVPicture(int size) { allocateArray(size); }
public AVPicture(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(int size);
@Override public AVPicture position(int position) {
return (AVPicture)super.position(position);
}
@Cast("uint8_t*") // uint8_t *data[AV_NUM_DATA_POINTERS];
public native BytePointer data(int i); public native AVPicture data(int i, BytePointer data);
// int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line
public native int linesize(int i); public native AVPicture linesize(int i, int linesize);
@MemberGetter public native IntPointer linesize();
}
/**
* @}
*/
public static final int // enum AVSubtitleType {
SUBTITLE_NONE = 0,
SUBTITLE_BITMAP = 1, ///< A bitmap, pict will be set
/**
* Plain text, the text field must be set by the decoder and is
* authoritative. ass and pict fields may contain approximations.
*/
SUBTITLE_TEXT = 2,
/**
* Formatted text, the ass field must be set by the decoder and is
* authoritative. pict and text fields may contain approximations.
*/
SUBTITLE_ASS = 3;
public static class AVSubtitleRect extends Pointer {
static { load(); }
public AVSubtitleRect() { allocate(); }
public AVSubtitleRect(int size) { allocateArray(size); }
public AVSubtitleRect(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(int size);
@Override public AVSubtitleRect position(int position) {
return (AVSubtitleRect)super.position(position);
}
public native int x(); public native AVSubtitleRect x(int x); ///< top left corner of pict, undefined when pict is not set
public native int y(); public native AVSubtitleRect y(int y); ///< top left corner of pict, undefined when pict is not set
public native int w(); public native AVSubtitleRect w(int w); ///< width of pict, undefined when pict is not set
public native int h(); public native AVSubtitleRect h(int h); ///< height of pict, undefined when pict is not set
public native int nb_colors(); public native AVSubtitleRect nb_colors(int nb_colors); ///< number of colors in pict, undefined when pict is not set
/**
* data+linesize for the bitmap of this subtitle.
* can be set for text/ass as well once they where rendered
*/
@ByRef
public native AVPicture pict(); public native AVSubtitleRect pict(AVPicture pict);
@Cast("AVSubtitleType")
public native int type(); public native AVSubtitleRect type(int type);
@Cast("char *") ///< 0 terminated plain UTF-8 text
public native BytePointer text(); public native AVSubtitleRect text(BytePointer text);
/**
* 0 terminated ASS/SSA compatible event line.
* The pressentation of this is unaffected by the other values in this
* struct.
*/
@Cast("char *")
public native BytePointer ass(); public native AVSubtitleRect ass(BytePointer ass);
/**
* 1 indicates this subtitle is a forced subtitle.
* A forced subtitle should be displayed even when subtitles are hidden.
*/
public native int forced(); public native AVSubtitleRect forced(int forced);
}
public static class AVSubtitle extends Pointer {
static { load(); }
public AVSubtitle() { allocate(); }
public AVSubtitle(int size) { allocateArray(size); }
public AVSubtitle(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(int size);
@Override public AVSubtitle position(int position) {
return (AVSubtitle)super.position(position);
}
public native short format(); public native AVSubtitle format(short format); /* 0 = graphics */
public native int start_display_time(); public native AVSubtitle start_display_time(int start_display_time); /* relative to packet pts, in ms */
public native int end_display_time(); public native AVSubtitle end_display_time(int end_display_time); /* relative to packet pts, in ms */
public native int num_rects(); public native AVSubtitle num_rects(int num_rects);
@Cast("AVSubtitleRect**")
public native PointerPointer rects(); public native AVSubtitle rects(PointerPointer rects);
public native long pts(); public native AVSubtitle pts(long pts); ///< Same as packet pts, in AV_TIME_BASE
}
/**
* If c is NULL, returns the first registered codec,
* if c is non-NULL, returns the next registered codec after c,
* or NULL if c is the last one.
*/
public static native AVCodec av_codec_next(AVCodec c);
/**
* Return the LIBAVCODEC_VERSION_INT constant.
*/
public static native @Cast("unsigned") int avcodec_version();
/**
* Return the libavcodec build-time configuration.
*/
public static native String avcodec_configuration();
/**
* Return the libavcodec license.
*/
public static native String avcodec_license();
/**
* Register the codec codec and initialize libavcodec.
*
* @warning either this function or avcodec_register_all() must be called
* before any other libavcodec functions.
*
* @see avcodec_register_all()
*/
public static native void avcodec_register(AVCodec codec);
/**
* Register all the codecs, parsers and bitstream filters which were enabled at
* configuration time. If you do not call this function you can select exactly
* which formats you want to support, by using the individual registration
* functions.
*
* @see avcodec_register
* @see av_register_codec_parser
* @see av_register_bitstream_filter
*/
public static native void avcodec_register_all();
/**
* Allocate an AVCodecContext and set its fields to default values. The
* resulting struct can be deallocated by calling avcodec_close() on it followed
* by av_free().
*
* @param codec if non-NULL, allocate private data and initialize defaults
* for the given codec. It is illegal to then call avcodec_open2()
* with a different codec.
* If NULL, then the codec-specific defaults won't be initialized,
* which may result in suboptimal default settings (this is
* important mainly for encoders, e.g. libx264).
*
* @return An AVCodecContext filled with default values or NULL on failure.
* @see avcodec_get_context_defaults
*/
public static native AVCodecContext avcodec_alloc_context3(AVCodec codec);
/**
* Set the fields of the given AVCodecContext to default values corresponding
* to the given codec (defaults may be codec-dependent).
*
* Do not call this function if a non-NULL codec has been passed
* to avcodec_alloc_context3() that allocated this AVCodecContext.
* If codec is non-NULL, it is illegal to call avcodec_open2() with a
* different codec on this AVCodecContext.
*/
public static native int avcodec_get_context_defaults3(AVCodecContext s, AVCodec codec);
/**
* Get the AVClass for AVCodecContext. It can be used in combination with
* AV_OPT_SEARCH_FAKE_OBJ for examining options.
*
* @see av_opt_find().
*/
public static native @Const AVClass avcodec_get_class();
/**
* Get the AVClass for AVFrame. It can be used in combination with
* AV_OPT_SEARCH_FAKE_OBJ for examining options.
*
* @see av_opt_find().
*/
public static native @Const AVClass avcodec_get_frame_class();
/**
* Get the AVClass for AVSubtitleRect. It can be used in combination with
* AV_OPT_SEARCH_FAKE_OBJ for examining options.
*
* @see av_opt_find().
*/
public static native @Const AVClass avcodec_get_subtitle_rect_class();
/**
* Copy the settings of the source AVCodecContext into the destination
* AVCodecContext. The resulting destination codec context will be
* unopened, i.e. you are required to call avcodec_open2() before you
* can use this AVCodecContext to decode/encode video/audio data.
*
* @param dest target codec context, should be initialized with
* avcodec_alloc_context3(), but otherwise uninitialized
* @param src source codec context
* @return AVERROR() on error (e.g. memory allocation error), 0 on success
*/
public static native int avcodec_copy_context(AVCodecContext dest, AVCodecContext src);
/**
* Allocate an AVFrame and set its fields to default values. The resulting
* struct can be deallocated by simply calling av_free().
*
* @return An AVFrame filled with default values or NULL on failure.
* @see avcodec_get_frame_defaults
*/
public static native AVFrame avcodec_alloc_frame();
/**
* Set the fields of the given AVFrame to default values.
*
* @param pic The AVFrame of which the fields should be set to default values.
*/
public static native void avcodec_get_frame_defaults(AVFrame pic);
/**
* Initialize the AVCodecContext to use the given AVCodec. Prior to using this
* function the context has to be allocated with avcodec_alloc_context3().
*
* The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
* avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
* retrieving a codec.
*
* @warning This function is not thread safe!
*
* @code
* avcodec_register_all();
* av_dict_set(&opts, "b", "2.5M", 0);
* codec = avcodec_find_decoder(CODEC_ID_H264);
* if (!codec)
* exit(1);
*
* context = avcodec_alloc_context3(codec);
*
* if (avcodec_open2(context, codec, opts) < 0)
* exit(1);
* @endcode
*
* @param avctx The context to initialize.
* @param codec The codec to open this context for. If a non-NULL codec has been
* previously passed to avcodec_alloc_context3() or
* avcodec_get_context_defaults3() for this context, then this
* parameter MUST be either NULL or equal to the previously passed
* codec.
* @param options A dictionary filled with AVCodecContext and codec-private options.
* On return this object will be filled with options that were not found.
*
* @return zero on success, a negative value on error
* @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(),
* av_dict_set(), av_opt_find().
*/
public static native int avcodec_open2(AVCodecContext avctx, AVCodec codec, @ByPtrPtr AVDictionary options);
/**
* Close a given AVCodecContext and free all the data associated with it
* (but not the AVCodecContext itself).
*
* Calling this function on an AVCodecContext that hasn't been opened will free
* the codec-specific data allocated in avcodec_alloc_context3() /
* avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will
* do nothing.
*/
public static native int avcodec_close(AVCodecContext avctx);
/**
* Free all allocated data in the given subtitle struct.
*
* @param sub AVSubtitle to free.
*/
public static native void avsubtitle_free(AVSubtitle sub);
/**
* @}
*/
/**
* @addtogroup lavc_packet
* @{
*/
/**
* Default packet destructor.
*/
public static native void av_destruct_packet(AVPacket pkt);
/**
* Initialize optional fields of a packet with default values.
*
* @param pkt packet
*/
public static native void av_init_packet(AVPacket pkt);
/**
* Allocate the payload of a packet and initialize its fields with
* default values.
*
* @param pkt packet
* @param size wanted payload size
* @return 0 if OK, AVERROR_xxx otherwise
*/
public static native int av_new_packet(AVPacket pkt, int size);
/**
* Reduce packet size, correctly zeroing padding
*
* @param pkt packet
* @param size new size
*/
public static native void av_shrink_packet(AVPacket pkt, int size);
/**
* Increase packet size, correctly zeroing padding
*
* @param pkt packet
* @param grow_by number of bytes by which to increase the size of the packet
*/
public static native int av_grow_packet(AVPacket pkt, int grow_by);
/**
* @warning This is a hack - the packet memory allocation stuff is broken. The
* packet is allocated if it was not really allocated.
*/
public static native int av_dup_packet(AVPacket pkt);
/**
* Free a packet.
*
* @param pkt packet to free
*/
public static native void av_free_packet(AVPacket pkt);
/**
* Allocate new information of a packet.
*
* @param pkt packet
* @param type side information type
* @param size side information size
* @return pointer to fresh allocated data or NULL otherwise
*/
public static native @Cast("uint8_t*") BytePointer av_packet_new_side_data(AVPacket pkt,
@Cast("AVPacketSideDataType") int type, int size);
/**
* Shrink the already allocated side data buffer
*
* @param pkt packet
* @param type side information type
* @param size new side information size
* @return 0 on success, < 0 on failure
*/
public static native int av_packet_shrink_side_data(AVPacket pkt,
@Cast("AVPacketSideDataType") int type, int size);
/**
* Get side information from packet.
*
* @param pkt packet
* @param type desired side information type
* @param size pointer for side information size to store (optional)
* @return pointer to data if present or NULL otherwise
*/
public static native @Cast("uint8_t*") BytePointer av_packet_get_side_data(AVPacket pkt,
@Cast("AVPacketSideDataType") int type, int[] size);
public static native int av_packet_merge_side_data(AVPacket pkt);
public static native int av_packet_split_side_data(AVPacket pkt);
/**
* @}
*/
/**
* @addtogroup lavc_decoding
* @{
*/
/**
* Find a registered decoder with a matching codec ID.
*
* @param id CodecID of the requested decoder
* @return A decoder if one was found, NULL otherwise.
*/
public static native AVCodec avcodec_find_decoder(@Cast("CodecID") int id);
/**
* Find a registered decoder with the specified name.
*
* @param name name of the requested decoder
* @return A decoder if one was found, NULL otherwise.
*/
public static native AVCodec avcodec_find_decoder_by_name(String name);
public static native int avcodec_default_get_buffer(AVCodecContext s, AVFrame pic);
public static native void avcodec_default_release_buffer(AVCodecContext s, AVFrame pic);
public static native int avcodec_default_reget_buffer(AVCodecContext s, AVFrame pic);
/**
* Return the amount of padding in pixels which the get_buffer callback must
* provide around the edge of the image for codecs which do not have the
* CODEC_FLAG_EMU_EDGE flag.
*
* @return Required padding in pixels.
*/
public static native @Cast("unsigned") int avcodec_get_edge_width();
/**
* Modify width and height values so that they will result in a memory
* buffer that is acceptable for the codec if you do not use any horizontal
* padding.
*
* May only be used if a codec with CODEC_CAP_DR1 has been opened.
* If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased
* according to avcodec_get_edge_width() before.
*/
public static native void avcodec_align_dimensions(AVCodecContext s, int[] width, int[] height);
/**
* Modify width and height values so that they will result in a memory
* buffer that is acceptable for the codec if you also ensure that all
* line sizes are a multiple of the respective linesize_align[i].
*
* May only be used if a codec with CODEC_CAP_DR1 has been opened.
* If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased
* according to avcodec_get_edge_width() before.
*/
public static native void avcodec_align_dimensions2(AVCodecContext s, int[] width, int[] height,
int linesize_align[/*AV_NUM_DATA_POINTERS*/]);
/**
* Decode the audio frame of size avpkt->size from avpkt->data into frame.
*
* Some decoders may support multiple frames in a single AVPacket. Such
* decoders would then just decode the first frame. In this case,
* avcodec_decode_audio4 has to be called again with an AVPacket containing
* the remaining data in order to decode the second frame, etc...
* Even if no frames are returned, the packet needs to be fed to the decoder
* with remaining data until it is completely consumed or an error occurs.
*
* @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE
* larger than the actual read bytes because some optimized bitstream
* readers read 32 or 64 bits at once and could read over the end.
*
* @note You might have to align the input buffer. The alignment requirements
* depend on the CPU and the decoder.
*
* @param avctx the codec context
* @param[out] frame The AVFrame in which to store decoded audio samples.
* Decoders request a buffer of a particular size by setting
* AVFrame.nb_samples prior to calling get_buffer(). The
* decoder may, however, only utilize part of the buffer by
* setting AVFrame.nb_samples to a smaller value in the
* output frame.
* @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is
* non-zero.
* @param[in] avpkt The input AVPacket containing the input buffer.
* At least avpkt->data and avpkt->size should be set. Some
* decoders might also require additional fields to be set.
* @return A negative error code is returned if an error occurred during
* decoding, otherwise the number of bytes consumed from the input
* AVPacket is returned.
*/
public static native int avcodec_decode_audio4(AVCodecContext avctx, AVFrame frame,
int[] got_frame_ptr, AVPacket avpkt);
/**
* Decode the video frame of size avpkt->size from avpkt->data into picture.
* Some decoders may support multiple frames in a single AVPacket, such
* decoders would then just decode the first frame.
*
* @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than
* the actual read bytes because some optimized bitstream readers read 32 or 64
* bits at once and could read over the end.
*
* @warning The end of the input buffer buf should be set to 0 to ensure that
* no overreading happens for damaged MPEG streams.
*
* @note You might have to align the input buffer avpkt->data.
* The alignment requirements depend on the CPU: on some CPUs it isn't
* necessary at all, on others it won't work at all if not aligned and on others
* it will work but it will have an impact on performance.
*
* In practice, avpkt->data should have 4 byte alignment at minimum.
*
* @note Codecs which have the CODEC_CAP_DELAY capability set have a delay
* between input and output, these need to be fed with avpkt->data=NULL,
* avpkt->size=0 at the end to return the remaining frames.
*
* @param avctx the codec context
* @param[out] picture The AVFrame in which the decoded video frame will be stored.
* Use avcodec_alloc_frame to get an AVFrame, the codec will
* allocate memory for the actual bitmap.
* with default get/release_buffer(), the decoder frees/reuses the bitmap as it sees fit.
* with overridden get/release_buffer() (needs CODEC_CAP_DR1) the user decides into what buffer the decoder
* decodes and the decoder tells the user once it does not need the data anymore,
* the user app can at this point free/reuse/keep the memory as it sees fit.
*
* @param[in] avpkt The input AVpacket containing the input buffer.
* You can create such packet with av_init_packet() and by then setting
* data and size, some decoders might in addition need other fields like
* flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least
* fields possible.
* @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero.
* @return On error a negative value is returned, otherwise the number of bytes
* used or zero if no frame could be decompressed.
*/
public static native int avcodec_decode_video2(AVCodecContext avctx, AVFrame picture,
int[] got_picture_ptr, AVPacket avpkt);
/**
* Decode a subtitle message.
* Return a negative value on error, otherwise return the number of bytes used.
* If no subtitle could be decompressed, got_sub_ptr is zero.
* Otherwise, the subtitle is stored in *sub.
* Note that CODEC_CAP_DR1 is not available for subtitle codecs. This is for
* simplicity, because the performance difference is expect to be negligible
* and reusing a get_buffer written for video codecs would probably perform badly
* due to a potentially very different allocation pattern.
*
* @param avctx the codec context
* @param[out] sub The AVSubtitle in which the decoded subtitle will be stored, must be
freed with avsubtitle_free if *got_sub_ptr is set.
* @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero.
* @param[in] avpkt The input AVPacket containing the input buffer.
*/
public static native int avcodec_decode_subtitle2(AVCodecContext avctx, AVSubtitle sub,
int[] got_sub_ptr, AVPacket avpkt);
/**
* @defgroup lavc_parsing Frame parsing
* @{
*/
public static class AVCodecParserContext extends Pointer {
static { load(); }
public AVCodecParserContext() { allocate(); }
public AVCodecParserContext(int size) { allocateArray(size); }
public AVCodecParserContext(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(int size);
@Override public AVCodecParserContext position(int position) {
return (AVCodecParserContext)super.position(position);
}
public native Pointer priv_data(); public native AVCodecParserContext priv_data(Pointer priv_data);
public native AVCodecParser parser(); public native AVCodecParserContext parser(AVCodecParser parser);
/* offset of the current frame */
public native long frame_offset(); public native AVCodecParserContext frame_offset(long frame_offset);
/* current offset (incremented by each av_parser_parse()) */
public native long cur_offset(); public native AVCodecParserContext cur_offset(long cur_offset);
/* offset of the next frame */
public native long next_frame_offset(); public native AVCodecParserContext next_frame_offset(long next_frame_offset);
/* video info */ /* XXX: Put it back in AVCodecContext. */
public native int pict_type(); public native AVCodecParserContext pict_type(int pict_type);
/**
* This field is used for proper frame duration computation in lavf.
* It signals, how much longer the frame duration of the current frame
* is compared to normal frame duration.
*
* frame_duration = (1 + repeat_pict) * time_base
*
* It is used by codecs like H.264 to display telecined material.
*/
public native int repeat_pict(); public native AVCodecParserContext repeat_pict(int repeat_pict); /* XXX: Put it back in AVCodecContext. */
public native long pts(); public native AVCodecParserContext pts(long pts); /* pts of the current frame */
public native long dts(); public native AVCodecParserContext dts(long dts); /* dts of the current frame */
/* private data */
public native long last_pts(); public native AVCodecParserContext last_pts(long last_pts);
public native long last_dts(); public native AVCodecParserContext last_dts(long last_dts);
public native int fetch_timestamp(); public native AVCodecParserContext fetch_timestamp(int fetch_timestamp);
public static final int AV_PARSER_PTS_NB = 4;
public native int cur_frame_start_index(); public native AVCodecParserContext cur_frame_start_index(int cur_frame_start_index);
public native long cur_frame_offset(int i); public native AVCodecParserContext cur_frame_offset(int i, long cur_frame_offset);
public native long cur_frame_pts(int i); public native AVCodecParserContext cur_frame_pts(int i, long cur_frame_pts);
public native long cur_frame_dts(int i); public native AVCodecParserContext cur_frame_dts(int i, long cur_frame_dts);
public native int flags(); public native AVCodecParserContext flags(int flags);
public static final int
PARSER_FLAG_COMPLETE_FRAMES = 0x0001,
PARSER_FLAG_ONCE = 0x0002,
/// Set if the parser has a valid file offset
PARSER_FLAG_FETCHED_OFFSET = 0x0004,
PARSER_FLAG_USE_CODEC_TS = 0x1000;
public native long offset(); public native AVCodecParserContext offset(long offset); ///< byte offset from starting packet start
public native long cur_frame_end(int i); public native AVCodecParserContext cur_frame_end(int i, long cur_frame_end);
/**
* Set by parser to 1 for key frames and 0 for non-key frames.
* It is initialized to -1, so if the parser doesn't set this flag,
* old-style fallback using AV_PICTURE_TYPE_I picture type as key frames
* will be used.
*/
public native int key_frame(); public native AVCodecParserContext key_frame(int key_frame);
/**
* Time difference in stream time base units from the pts of this
* packet to the point at which the output from the decoder has converged
* independent from the availability of previous frames. That is, the
* frames are virtually identical no matter if decoding started from
* the very first frame or from this keyframe.
* Is AV_NOPTS_VALUE if unknown.
* This field is not the display duration of the current frame.
* This field has no meaning if the packet does not have AV_PKT_FLAG_KEY
* set.
*
* The purpose of this field is to allow seeking in streams that have no
* keyframes in the conventional sense. It corresponds to the
* recovery point SEI in H.264 and match_time_delta in NUT. It is also
* essential for some types of subtitle streams to ensure that all
* subtitles are correctly displayed after seeking.
*/
public native long convergence_duration(); public native AVCodecParserContext convergence_duration(long convergence_duration);
// Timestamp generation support:
/**
* Synchronization point for start of timestamp generation.
*
* Set to >0 for sync point, 0 for no sync point and <0 for undefined
* (default).
*
* For example, this corresponds to presence of H.264 buffering period
* SEI message.
*/
public native int dts_sync_point(); public native AVCodecParserContext dts_sync_point(int dts_sync_point);
/**
* Offset of the current timestamp against last timestamp sync point in
* units of AVCodecContext.time_base.
*
* Set to INT_MIN when dts_sync_point unused. Otherwise, it must
* contain a valid timestamp offset.
*
* Note that the timestamp of sync point has usually a nonzero
* dts_ref_dts_delta, which refers to the previous sync point. Offset of
* the next frame after timestamp sync point will be usually 1.
*
* For example, this corresponds to H.264 cpb_removal_delay.
*/
public native int dts_ref_dts_delta(); public native AVCodecParserContext dts_ref_dts_delta(int dts_ref_dts_delta);
/**
* Presentation delay of current frame in units of AVCodecContext.time_base.
*
* Set to INT_MIN when dts_sync_point unused. Otherwise, it must
* contain valid non-negative timestamp delta (presentation time of a frame
* must not lie in the past).
*
* This delay represents the difference between decoding and presentation
* time of the frame.
*
* For example, this corresponds to H.264 dpb_output_delay.
*/
public native int pts_dts_delta(); public native AVCodecParserContext pts_dts_delta(int pts_dts_delta);
/**
* Position of the packet in file.
*
* Analogous to cur_frame_pts/dts
*/
// int64_t cur_frame_pos[AV_PARSER_PTS_NB];
public native long cur_frame_pos(int i); public native AVCodecParserContext cur_frame_pos(int i, long cur_frame_pos);
/**
* Byte position of currently parsed frame in stream.
*/
public native long pos(); public native AVCodecParserContext pos(long pos);
/**
* Previous frame byte position.
*/
public native long last_pos(); public native AVCodecParserContext last_pos(long last_pos);
/**
* Duration of the current frame.
* For audio, this is in units of 1 / AVCodecContext.sample_rate.
* For all other types, this is in units of AVCodecContext.time_base.
*/
public native int duration(); public native AVCodecParserContext duration(int duration);
}
public static class AVCodecParser extends Pointer {
static { load(); }
public AVCodecParser() { allocate(); }
public AVCodecParser(int size) { allocateArray(size); }
public AVCodecParser(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(int size);
@Override public AVCodecParser position(int position) {
return (AVCodecParser)super.position(position);
}
public native int/*[5]*/ codec_ids(int i); public native AVCodecParser codec_ids(int i, int codec_id); /* several codec IDs are permitted */
public native int priv_data_size(); public native AVCodecParser priv_data_size(int priv_data_size);
public static class Parser_init extends FunctionPointer {
static { load(); }
public Parser_init(Pointer p) { super(p); }
protected Parser_init() { allocate(); }
protected final native void allocate();
public native int call(AVCodecParserContext s);
}
public native Parser_init parser_init(); public native AVCodecParser parser_init(Parser_init parser_init);
public static class Parser_parse extends FunctionPointer {
static { load(); }
public Parser_parse(Pointer p) { super(p); }
protected Parser_parse() { allocate(); }
protected final native void allocate();
public native int call(AVCodecParserContext s, AVCodecContext avctx,
@Cast("const uint8_t**") PointerPointer poutbuf, IntPointer poutbuf_size,
@Cast("const uint8_t*") BytePointer buf, int buf_size);
}
public native Parser_parse parser_parse(); public native AVCodecParser parser_parse(Parser_parse parser_parse);
public static class Parser_close extends FunctionPointer {
static { load(); }
public Parser_close(Pointer p) { super(p); }
protected Parser_close() { allocate(); }
protected final native void allocate();
public native void call(AVCodecParserContext s);
}
public native Parser_close parser_close(); public native AVCodecParser parser_close(Parser_close parser_close);
public static class Split extends FunctionPointer {
static { load(); }
public Split(Pointer p) { super(p); }
protected Split() { allocate(); }
protected final native void allocate();
public native int call(AVCodecContext avctx, @Cast("const uint8_t*") BytePointer buf, int buf_size);
}
public native Split split(); public native AVCodecParser split(Split split);
public native AVCodecParser next(); public native AVCodecParser next(AVCodecParser next);
}
public static native AVCodecParser av_parser_next(AVCodecParser c);
public static native void av_register_codec_parser(AVCodecParser parser);
public static native AVCodecParserContext av_parser_init(int codec_id);
/**
* Parse a packet.
*
* @param s parser context.
* @param avctx codec context.
* @param poutbuf set to pointer to parsed buffer or NULL if not yet finished.
* @param poutbuf_size set to size of parsed buffer or zero if not yet finished.
* @param buf input buffer.
* @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output).
* @param pts input presentation timestamp.
* @param dts input decoding timestamp.
* @param pos input byte position in stream.
* @return the number of bytes of the input bitstream used.
*
* Example:
* @code
* while(in_len){
* len = av_parser_parse2(myparser, AVCodecContext, &data, &size,
* in_data, in_len,
* pts, dts, pos);
* in_data += len;
* in_len -= len;
*
* if(size)
* decode_frame(data, size);
* }
* @endcode
*/
public static native int av_parser_parse2(AVCodecParserContext s,
AVCodecContext avctx, @Cast("uint8_t**") PointerPointer poutbuf, int[] poutbuf_size,
@Cast("uint8_t*") BytePointer buf, int buf_size, long pts, long dts, long pos);
public static native int av_parser_change(AVCodecParserContext s,
AVCodecContext avctx, @Cast("uint8_t**") PointerPointer poutbuf, int[] poutbuf_size,
@Cast("uint8_t*") BytePointer buf, int buf_size, int keyframe);
public static native void av_parser_close(AVCodecParserContext s);
/**
* @}
* @}
*/
/**
* @addtogroup lavc_encoding
* @{
*/
/**
* Find a registered encoder with a matching codec ID.
*
* @param id CodecID of the requested encoder
* @return An encoder if one was found, NULL otherwise.
*/
public static native AVCodec avcodec_find_encoder(@Cast("CodecID") int id);
/**
* Find a registered encoder with the specified name.
*
* @param name name of the requested encoder
* @return An encoder if one was found, NULL otherwise.
*/
public static native AVCodec avcodec_find_encoder_by_name(String name);
/**
* Encode a frame of audio.
*
* Takes input samples from frame and writes the next output packet, if
* available, to avpkt. The output packet does not necessarily contain data for
* the most recent frame, as encoders can delay, split, and combine input frames
* internally as needed.
*
* @param avctx codec context
* @param avpkt output AVPacket.
* The user can supply an output buffer by setting
* avpkt->data and avpkt->size prior to calling the
* function, but if the size of the user-provided data is not
* large enough, encoding will fail. All other AVPacket fields
* will be reset by the encoder using av_init_packet(). If
* avpkt->data is NULL, the encoder will allocate it.
* The encoder will set avpkt->size to the size of the
* output packet.
*
* If this function fails or produces no output, avpkt will be
* freed using av_free_packet() (i.e. avpkt->destruct will be
* called to free the user supplied buffer).
* @param[in] frame AVFrame containing the raw audio data to be encoded.
* May be NULL when flushing an encoder that has the
* CODEC_CAP_DELAY capability set.
* If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
* can have any number of samples.
* If it is not set, frame->nb_samples must be equal to
* avctx->frame_size for all frames except the last.
* The final frame may be smaller than avctx->frame_size.
* @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
* output packet is non-empty, and to 0 if it is
* empty. If the function returns an error, the
* packet can be assumed to be invalid, and the
* value of got_packet_ptr is undefined and should
* not be used.
* @return 0 on success, negative error code on failure
*/
public static native int avcodec_encode_audio2(AVCodecContext avctx, AVPacket avpkt,
AVFrame frame, int[] got_packet_ptr);
/**
* Encode a frame of video.
*
* Takes input raw video data from frame and writes the next output packet, if
* available, to avpkt. The output packet does not necessarily contain data for
* the most recent frame, as encoders can delay and reorder input frames
* internally as needed.
*
* @param avctx codec context
* @param avpkt output AVPacket.
* The user can supply an output buffer by setting
* avpkt->data and avpkt->size prior to calling the
* function, but if the size of the user-provided data is not
* large enough, encoding will fail. All other AVPacket fields
* will be reset by the encoder using av_init_packet(). If
* avpkt->data is NULL, the encoder will allocate it.
* The encoder will set avpkt->size to the size of the
* output packet. The returned data (if any) belongs to the
* caller, he is responsible for freeing it.
*
* If this function fails or produces no output, avpkt will be
* freed using av_free_packet() (i.e. avpkt->destruct will be
* called to free the user supplied buffer).
* @param[in] frame AVFrame containing the raw video data to be encoded.
* May be NULL when flushing an encoder that has the
* CODEC_CAP_DELAY capability set.
* @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
* output packet is non-empty, and to 0 if it is
* empty. If the function returns an error, the
* packet can be assumed to be invalid, and the
* value of got_packet_ptr is undefined and should
* not be used.
* @return 0 on success, negative error code on failure
*/
public static native int avcodec_encode_video2(AVCodecContext avctx, AVPacket avpkt,
AVFrame frame, int[] got_packet_ptr);
public static native int avcodec_encode_subtitle(AVCodecContext avctx,
@Cast("uint8_t*") BytePointer buf, int buf_size, AVSubtitle sub);
/**
* @}
*/
/**
* @defgroup lavc_resample Audio resampling
* @ingroup libavc
*
* @{
*/
@Opaque public static class ReSampleContext extends Pointer {
public ReSampleContext() { }
public ReSampleContext(Pointer p) { super(p); }
}
@Opaque public static class AVResampleContext extends Pointer {
public AVResampleContext() { }
public AVResampleContext(Pointer p) { super(p); }
}
/**
* Initialize audio resampling context.
*
* @param output_channels number of output channels
* @param input_channels number of input channels
* @param output_rate output sample rate
* @param input_rate input sample rate
* @param sample_fmt_out requested output sample format
* @param sample_fmt_in input sample format
* @param filter_length length of each FIR filter in the filterbank relative to the cutoff frequency
* @param log2_phase_count log2 of the number of entries in the polyphase filterbank
* @param linear if 1 then the used FIR filter will be linearly interpolated
between the 2 closest, if 0 the closest will be used
* @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate
* @return allocated ReSampleContext, NULL if error occurred
*/
public static native ReSampleContext av_audio_resample_init(int output_channels, int input_channels,
int output_rate, int input_rate, @Cast("AVSampleFormat") int sample_fmt_out, @Cast("AVSampleFormat") int sample_fmt_in,
int filter_length, int log2_phase_count, int linear, double cutoff);
public static native int audio_resample(ReSampleContext s, short[] output, short[] input, int nb_samples);
public static native int audio_resample(ReSampleContext s, ShortBuffer output, ShortBuffer input, int nb_samples);
public static native int audio_resample(ReSampleContext s, ShortPointer output, ShortPointer input, int nb_samples);
public static native int audio_resample(ReSampleContext s, @Cast("short*") byte[] output, @Cast("short*") byte[] input, int nb_samples);
public static native int audio_resample(ReSampleContext s, @Cast("short*") Buffer output, @Cast("short*") Buffer input, int nb_samples);
public static native int audio_resample(ReSampleContext s, @Cast("short*") Pointer output, @Cast("short*") Pointer input, int nb_samples);
/**
* Free resample context.
*
* @param s a non-NULL pointer to a resample context previously
* created with av_audio_resample_init()
*/
public static native void audio_resample_close(ReSampleContext s);
/**
* Initialize an audio resampler.
* Note, if either rate is not an integer then simply scale both rates up so they are.
* @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq
* @param log2_phase_count log2 of the number of entries in the polyphase filterbank
* @param linear If 1 then the used FIR filter will be linearly interpolated
between the 2 closest, if 0 the closest will be used
* @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate
*/
public static native AVResampleContext av_resample_init(int out_rate, int in_rate,
int filter_length, int log2_phase_count, int linear, double cutoff);
/**
* Resample an array of samples using a previously configured context.
* @param src an array of unconsumed samples
* @param consumed the number of samples of src which have been consumed are returned here
* @param src_size the number of unconsumed samples available
* @param dst_size the amount of space in samples available in dst
* @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context.
* @return the number of samples written in dst or -1 if an error occurred
*/
public static native int av_resample(AVResampleContext c, short[] dst, short[] src,
int[] consumed, int src_size, int dst_size, int update_ctx);
public static native int av_resample(AVResampleContext c, ShortBuffer dst, ShortBuffer src,
int[] consumed, int src_size, int dst_size, int update_ctx);
public static native int av_resample(AVResampleContext c, ShortPointer dst, ShortPointer src,
int[] consumed, int src_size, int dst_size, int update_ctx);
public static native int av_resample(AVResampleContext c, @Cast("short*") byte[] dst, @Cast("short*") byte[] src,
int[] consumed, int src_size, int dst_size, int update_ctx);
public static native int av_resample(AVResampleContext c, @Cast("short*") Buffer dst, @Cast("short*") Buffer src,
int[] consumed, int src_size, int dst_size, int update_ctx);
public static native int av_resample(AVResampleContext c, @Cast("short*") Pointer dst, @Cast("short*") Pointer src,
int[] consumed, int src_size, int dst_size, int update_ctx);
/**
* Compensate samplerate/timestamp drift. The compensation is done by changing
* the resampler parameters, so no audible clicks or similar distortions occur
* @param compensation_distance distance in output samples over which the compensation should be performed
* @param sample_delta number of output samples which should be output less
*
* example: av_resample_compensate(c, 10, 500)
* here instead of 510 samples only 500 samples would be output
*
* note, due to rounding the actual compensation might be slightly different,
* especially if the compensation_distance is large and the in_rate used during init is small
*/
public static native void av_resample_compensate(AVResampleContext c, int sample_delta, int compensation_distance);
public static native void av_resample_close(AVResampleContext c);
/**
* @}
*/
/**
* @addtogroup lavc_picture
* @{
*/
/**
* Allocate memory for a picture. Call avpicture_free() to free it.
*
* @see avpicture_fill()
*
* @param picture the picture to be filled in
* @param pix_fmt the format of the picture
* @param width the width of the picture
* @param height the height of the picture
* @return zero if successful, a negative value if not
*/
public static native int avpicture_alloc(AVPicture picture, @Cast("PixelFormat") int pix_fmt, int width, int height);
/**
* Free a picture previously allocated by avpicture_alloc().
* The data buffer used by the AVPicture is freed, but the AVPicture structure
* itself is not.
*
* @param picture the AVPicture to be freed
*/
public static native void avpicture_free(AVPicture picture);
/**
* Fill in the AVPicture fields.
* The fields of the given AVPicture are filled in by using the 'ptr' address
* which points to the image data buffer. Depending on the specified picture
* format, one or multiple image data pointers and line sizes will be set.
* If a planar format is specified, several pointers will be set pointing to
* the different picture planes and the line sizes of the different planes
* will be stored in the lines_sizes array.
* Call with ptr == NULL to get the required size for the ptr buffer.
*
* To allocate the buffer and fill in the AVPicture fields in one call,
* use avpicture_alloc().
*
* @param picture AVPicture whose fields are to be filled in
* @param ptr Buffer which will contain or contains the actual image data
* @param pix_fmt The format in which the picture data is stored.
* @param width the width of the image in pixels
* @param height the height of the image in pixels
* @return size of the image data in bytes
*/
public static native int avpicture_fill(AVPicture picture, @Cast("uint8_t*") BytePointer ptr,
@Cast("PixelFormat") int pix_fmt, int width, int height);
/**
* Copy pixel data from an AVPicture into a buffer.
* The data is stored compactly, without any gaps for alignment or padding
* which may be applied by avpicture_fill().
*
* @see avpicture_get_size()
*
* @param[in] src AVPicture containing image data
* @param[in] pix_fmt The format in which the picture data is stored.
* @param[in] width the width of the image in pixels.
* @param[in] height the height of the image in pixels.
* @param[out] dest A buffer into which picture data will be copied.
* @param[in] dest_size The size of 'dest'.
* @return The number of bytes written to dest, or a negative value (error code) on error.
*/
public static native int avpicture_layout(AVPicture src, @Cast("PixelFormat") int pix_fmt,
int width, int height, @Cast("unsigned char*") BytePointer dest, int dest_size);
/**
* Calculate the size in bytes that a picture of the given width and height
* would occupy if stored in the given picture format.
* Note that this returns the size of a compact representation as generated
* by avpicture_layout(), which can be smaller than the size required for e.g.
* avpicture_fill().
*
* @param pix_fmt the given picture format
* @param width the width of the image
* @param height the height of the image
* @return Image data size in bytes or -1 on error (e.g. too large dimensions).
*/
public static native int avpicture_get_size(@Cast("PixelFormat") int pix_fmt, int width, int height);
/**
* deinterlace - if not supported return -1
*/
public static native int avpicture_deinterlace(AVPicture dst, AVPicture src,
@Cast("PixelFormat") int pix_fmt, int width, int height);
/**
* Copy image src to dst. Wraps av_picture_data_copy() above.
*/
public static native void av_picture_copy(AVPicture dst, AVPicture src,
@Cast("PixelFormat") int pix_fmt, int width, int height);
/**
* Crop image top and left side.
*/
public static native int av_picture_crop(AVPicture dst, AVPicture src,
@Cast("PixelFormat") int pix_fmt, int top_band, int left_band);
/**
* Pad image.
*/
public static native int av_picture_pad(AVPicture dst, AVPicture src,
int height, int width, @Cast("PixelFormat") int pix_fmt,
int padtop, int padbottom, int padleft, int padright, int[] color);
/**
* @}
*/
/**
* @defgroup lavc_misc Utility functions
* @ingroup libavc
*
* Miscellaneous utility functions related to both encoding and decoding
* (or neither).
* @{
*/
/**
* @defgroup lavc_misc_pixfmt Pixel formats
*
* Functions for working with pixel formats.
* @{
*/
public static native void avcodec_get_chroma_sub_sample(@Cast("PixelFormat") int pix_fmt, int[] h_shift, int[] v_shift);
/**
* Return a value representing the fourCC code associated to the
* pixel format pix_fmt, or 0 if no associated fourCC code can be
* found.
*/
public static native @Cast("unsigned") int avcodec_pix_fmt_to_codec_tag(@Cast("PixelFormat") int pix_fmt);
public static final int
FF_LOSS_RESOLUTION = 0x0001, /**< loss due to resolution change */
FF_LOSS_DEPTH = 0x0002, /**< loss due to color depth change */
FF_LOSS_COLORSPACE = 0x0004, /**< loss due to color space conversion */
FF_LOSS_ALPHA = 0x0008, /**< loss of alpha bits */
FF_LOSS_COLORQUANT = 0x0010, /**< loss due to color quantization */
FF_LOSS_CHROMA = 0x0020; /**< loss of chroma (e.g. RGB to gray conversion) */
/**
* Compute what kind of losses will occur when converting from one specific
* pixel format to another.
* When converting from one pixel format to another, information loss may occur.
* For example, when converting from RGB24 to GRAY, the color information will
* be lost. Similarly, other losses occur when converting from some formats to
* other formats. These losses can involve loss of chroma, but also loss of
* resolution, loss of color depth, loss due to the color space conversion, loss
* of the alpha bits or loss due to color quantization.
* avcodec_get_fix_fmt_loss() informs you about the various types of losses
* which will occur when converting from one pixel format to another.
*
* @param[in] dst_pix_fmt destination pixel format
* @param[in] src_pix_fmt source pixel format
* @param[in] has_alpha Whether the source pixel format alpha channel is used.
* @return Combination of flags informing you what kind of losses will occur
* (maximum loss for an invalid dst_pix_fmt).
*/
public static native int avcodec_get_pix_fmt_loss(@Cast("PixelFormat") int dst_pix_fmt,
@Cast("PixelFormat") int src_pix_fmt, int has_alpha);
/**
* Find the best pixel format to convert to given a certain source pixel
* format. When converting from one pixel format to another, information loss
* may occur. For example, when converting from RGB24 to GRAY, the color
* information will be lost. Similarly, other losses occur when converting from
* some formats to other formats. avcodec_find_best_pix_fmt() searches which of
* the given pixel formats should be used to suffer the least amount of loss.
* The pixel formats from which it chooses one, are determined by the
* pix_fmt_mask parameter.
*
* Note, only the first 64 pixel formats will fit in pix_fmt_mask.
*
* @code
* src_pix_fmt = PIX_FMT_YUV420P;
* pix_fmt_mask = (1 << PIX_FMT_YUV422P) | (1 << PIX_FMT_RGB24);
* dst_pix_fmt = avcodec_find_best_pix_fmt(pix_fmt_mask, src_pix_fmt, alpha, &loss);
* @endcode
*
* @param[in] pix_fmt_mask bitmask determining which pixel format to choose from
* @param[in] src_pix_fmt source pixel format
* @param[in] has_alpha Whether the source pixel format alpha channel is used.
* @param[out] loss_ptr Combination of flags informing you what kind of losses will occur.
* @return The best pixel format to convert to or -1 if none was found.
*/
public static native @Cast("PixelFormat") int avcodec_find_best_pix_fmt(long pix_fmt_mask,
@Cast("PixelFormat") int src_pix_fmt, int has_alpha, int[] loss_ptr);
/**
* Find the best pixel format to convert to given a certain source pixel
* format and a selection of two destination pixel formats. When converting from
* one pixel format to another, information loss may occur. For example, when converting
* from RGB24 to GRAY, the color information will be lost. Similarly, other losses occur when
* converting from some formats to other formats. avcodec_find_best_pix_fmt2() selects which of
* the given pixel formats should be used to suffer the least amount of loss.
*
* If one of the destination formats is PIX_FMT_NONE the other pixel format (if valid) will be
* returned.
*
* @code
* src_pix_fmt = PIX_FMT_YUV420P;
* dst_pix_fmt1= PIX_FMT_RGB24;
* dst_pix_fmt2= PIX_FMT_GRAY8;
* dst_pix_fmt3= PIX_FMT_RGB8;
* loss= FF_LOSS_CHROMA; // don't care about chroma loss, so chroma loss will be ignored.
* dst_pix_fmt = avcodec_find_best_pix_fmt2(dst_pix_fmt1, dst_pix_fmt2, src_pix_fmt, alpha, &loss);
* dst_pix_fmt = avcodec_find_best_pix_fmt2(dst_pix_fmt, dst_pix_fmt3, src_pix_fmt, alpha, &loss);
* @endcode
*
* @param[in] dst_pix_fmt1 One of the two destination pixel formats to choose from
* @param[in] dst_pix_fmt2 The other of the two destination pixel formats to choose from
* @param[in] src_pix_fmt Source pixel format
* @param[in] has_alpha Whether the source pixel format alpha channel is used.
* @param[in, out] loss_ptr Combination of loss flags. In: selects which of the losses to ignore, i.e.
* NULL or value of zero means we care about all losses. Out: the loss
* that occurs when converting from src to selected dst pixel format.
* @return The best pixel format to convert to or -1 if none was found.
*/
public static native @Cast("PixelFormat") int avcodec_find_best_pix_fmt2(@Cast("PixelFormat") int dst_pix_fmt1,
@Cast("PixelFormat") int dst_pix_fmt2, @Cast("PixelFormat") int src_pix_fmt, int has_alpha, int[] loss_ptr);
public static native @Cast("PixelFormat") int avcodec_default_get_format(AVCodecContext s, @Cast("PixelFormat*") int[] fmt);
/**
* @}
*/
public static native void avcodec_set_dimensions(AVCodecContext s, int width, int height);
/**
* Put a string representing the codec tag codec_tag in buf.
*
* @param buf_size size in bytes of buf
* @return the length of the string that would have been generated if
* enough space had been available, excluding the trailing null
*/
public static native @Cast("size_t") long av_get_codec_tag_string(@Cast("char*") BytePointer buf,
@Cast("size_t") long buf_size, @Cast("unsigned") int codec_tag);
public static native void avcodec_string(@Cast("char*") BytePointer buf, int buf_size, AVCodecContext enc, int encode);
/**
* Return a name for the specified profile, if available.
*
* @param codec the codec that is searched for the given profile
* @param profile the profile value for which a name is requested
* @return A name for the profile if found, NULL otherwise.
*/
public static native String av_get_profile_name(AVCodec codec, int profile);
public static native int avcodec_default_execute(AVCodecContext c, AVCodecContext.Execute.Func func, Pointer arg, IntPointer ret, int count, int size);
public static native int avcodec_default_execute2(AVCodecContext c, AVCodecContext.Execute2.Func2 func2, Pointer arg, IntPointer ret, int count);
//FIXME func typedef
/**
* Fill audio frame data and linesize.
* AVFrame extended_data channel pointers are allocated if necessary for
* planar audio.
*
* @param frame the AVFrame
* frame->nb_samples must be set prior to calling the
* function. This function fills in frame->data,
* frame->extended_data, frame->linesize[0].
* @param nb_channels channel count
* @param sample_fmt sample format
* @param buf buffer to use for frame data
* @param buf_size size of buffer
* @param align plane size sample alignment (0 = default)
* @return 0 on success, negative error code on failure
*/
public static native int avcodec_fill_audio_frame(AVFrame frame, int nb_channels,
@Cast("AVSampleFormat") int sample_fmt, @Cast("uint8_t*") BytePointer buf, int buf_size, int align);
/**
* Flush buffers, should be called when seeking or when switching to a different stream.
*/
public static native void avcodec_flush_buffers(AVCodecContext avctx);
public static native void avcodec_default_free_buffers(AVCodecContext s);
/**
* Return codec bits per sample.
*
* @param[in] codec_id the codec
* @return Number of bits per sample or zero if unknown for the given codec.
*/
public static native int av_get_bits_per_sample(@Cast("CodecID") int codec_id);
/**
* Return the PCM codec associated with a sample format.
* @param be endianness, 0 for little, 1 for big,
* -1 (or anything else) for native
* @return CODEC_ID_PCM_* or CODEC_ID_NONE
*/
public static native @Cast("CodecID") int av_get_pcm_codec(@Cast("AVSampleFormat") int fmt, int be);
/**
* Return codec bits per sample.
* Only return non-zero if the bits per sample is exactly correct, not an
* approximation.
*
* @param[in] codec_id the codec
* @return Number of bits per sample or zero if unknown for the given codec.
*/
public static native int av_get_exact_bits_per_sample(@Cast("CodecID") int codec_id);
/**
* Return audio frame duration.
*
* @param avctx codec context
* @param frame_bytes size of the frame, or 0 if unknown
* @return frame duration, in samples, if known. 0 if not able to
* determine.
*/
public static native int av_get_audio_frame_duration(AVCodecContext avctx, int frame_bytes);
public static class AVBitStreamFilterContext extends Pointer {
static { load(); }
public AVBitStreamFilterContext() { allocate(); }
public AVBitStreamFilterContext(int size) { allocateArray(size); }
public AVBitStreamFilterContext(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(int size);
@Override public AVBitStreamFilterContext position(int position) {
return (AVBitStreamFilterContext)super.position(position);
}
public native Pointer priv_data(); public native AVBitStreamFilterContext priv_data(Pointer priv_data);
public native AVBitStreamFilter filter(); public native AVBitStreamFilterContext filter(AVBitStreamFilter filter);
public native AVCodecParserContext parser(); public native AVBitStreamFilterContext parser(AVCodecParserContext parser);
public native AVBitStreamFilterContext next(); public native AVBitStreamFilterContext next(AVBitStreamFilterContext next);
}
public static class AVBitStreamFilter extends Pointer {
static { load(); }
public AVBitStreamFilter() { allocate(); }
public AVBitStreamFilter(int size) { allocateArray(size); }
public AVBitStreamFilter(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(int size);
@Override public AVBitStreamFilter position(int position) {
return (AVBitStreamFilter)super.position(position);
}
@Cast("const char *")
public native BytePointer name(); public native AVBitStreamFilter name(BytePointer name);
public native int priv_data_size(); public native AVBitStreamFilter priv_data_size(int priv_data_size);
public static class Filter extends FunctionPointer {
static { load(); }
public Filter(Pointer p) { super(p); }
protected Filter() { allocate(); }
protected final native void allocate();
public native int call(AVBitStreamFilterContext bsfc, AVCodecContext avctx,
String args, @Cast("uint8_t**") PointerPointer poutbuf, IntPointer poutbuf_size,
@Cast("const uint8_t*") BytePointer buf, int buf_size, int keyframe);
}
public native Filter filter(); public native AVBitStreamFilter filter(Filter filter);
public static class Close extends FunctionPointer {
static { load(); }
public Close(Pointer p) { super(p); }
protected Close() { allocate(); }
protected final native void allocate();
public native void call(AVBitStreamFilterContext bsfc);
}
public native Close close(); public native AVBitStreamFilter close(Close close);
public native AVBitStreamFilter next(); public native AVBitStreamFilter next(AVBitStreamFilter next);
}
public static native void av_register_bitstream_filter(AVBitStreamFilter bsf);
public static native AVBitStreamFilterContext av_bitstream_filter_init(String name);
public static native int av_bitstream_filter_filter(AVBitStreamFilterContext bsfc,
AVCodecContext avctx, String args, @Cast("uint8_t**") PointerPointer poutbuf, int[] poutbuf_size,
@Cast("uint8_t*") BytePointer buf, int buf_size, int keyframe);
public static native void av_bitstream_filter_close(AVBitStreamFilterContext bsf);
public static native AVBitStreamFilter av_bitstream_filter_next(AVBitStreamFilter f);
/* memory */
/**
* Reallocate the given block if it is not large enough, otherwise do nothing.
*
* @see av_realloc
*/
public static native Pointer av_fast_realloc(Pointer ptr, @Cast("unsigned*") int[] size, @Cast("size_t") long min_size);
/**
* Allocate a buffer, reusing the given one if large enough.
*
* Contrary to av_fast_realloc the current buffer contents might not be
* preserved and on error the old buffer is freed, thus no special
* handling to avoid memleaks is necessary.
*
* @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer
* @param size size of the buffer *ptr points to
* @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and
* *size 0 if an error occurred.
*/
public static native void av_fast_malloc(Pointer ptr, @Cast("unsigned*") int[] size, @Cast("size_t") long min_size);
/**
* Same behaviour av_fast_malloc but the buffer has additional
* FF_INPUT_PADDING_SIZE at the end which will will always be 0.
*
* In addition the whole buffer will initially and after resizes
* be 0-initialized so that no uninitialized data will ever appear.
*/
public static native void av_fast_padded_malloc(Pointer ptr, @Cast("unsigned*") int[] size, @Cast("size_t") long min_size);
/**
* Encode extradata length to a buffer. Used by xiph codecs.
*
* @param s buffer to write to; must be at least (v/255+1) bytes long
* @param v size of extradata in bytes
* @return number of bytes written to the buffer.
*/
public static native @Cast("unsigned") int av_xiphlacing(@Cast("unsigned char*") BytePointer s, @Cast("unsigned") int v);
/**
* Log a generic warning message about a missing feature. This function is
* intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)
* only, and would normally not be used by applications.
* @param[in] avc a pointer to an arbitrary struct of which the first field is
* a pointer to an AVClass struct
* @param[in] feature string containing the name of the missing feature
* @param[in] want_sample indicates if samples are wanted which exhibit this feature.
* If want_sample is non-zero, additional verbage will be added to the log
* message which tells the user how to report samples to the development
* mailing list.
*/
public static native void av_log_missing_feature(Pointer avc, String feature, int want_sample);
/**
* Log a generic warning message asking for a sample. This function is
* intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)
* only, and would normally not be used by applications.
* @param[in] avc a pointer to an arbitrary struct of which the first field is
* a pointer to an AVClass struct
* @param[in] msg string containing an optional message, or NULL if no message
*/
public static native void av_log_ask_for_sample(Pointer avc, String msg); // , ...) av_printf_format(2, 3);
/**
* Register the hardware accelerator hwaccel.
*/
public static native void av_register_hwaccel(AVHWAccel hwaccel);
/**
* If hwaccel is NULL, returns the first registered hardware accelerator,
* if hwaccel is non-NULL, returns the next registered hardware accelerator
* after hwaccel, or NULL if hwaccel is the last one.
*/
public static native AVHWAccel av_hwaccel_next(AVHWAccel hwaccel);
/**
* Lock operation used by lockmgr
*/
public static final int // enum AVLockOp {
AV_LOCK_CREATE = 0, ///< Create a mutex
AV_LOCK_OBTAIN = 1, ///< Lock the mutex
AV_LOCK_RELEASE = 2, ///< Unlock the mutex
AV_LOCK_DESTROY = 3; ///< Free mutex resources
/**
* Register a user provided lock manager supporting the operations
* specified by AVLockOp. mutex points to a (void *) where the
* lockmgr should store/get a pointer to a user allocated mutex. It's
* NULL upon AV_LOCK_CREATE and != NULL for all other ops.
*
* @param cb User defined callback. Note: FFmpeg may invoke calls to this
* callback during the call to av_lockmgr_register().
* Thus, the application must be prepared to handle that.
* If cb is set to NULL the lockmgr will be unregistered.
* Also note that during unregistration the previously registered
* lockmgr callback may also be invoked.
*/
public static class Cb extends FunctionPointer {
static { load(); }
public Cb(Pointer p) { super(p); }
protected Cb() { allocate(); }
protected final native void allocate();
public native int call(PointerPointer mutex, @Cast("AVLockOp") int op);
}
public static native int av_lockmgr_register(Cb cb);
/**
* Get the type of the given codec.
*/
public static native @Cast("AVMediaType") int avcodec_get_type(@Cast("CodecID") int codec_id);
/**
* Get the name of a codec.
* @return a static string identifying the codec; never NULL
*/
public static native String avcodec_get_name(@Cast("CodecID") int id);
/**
* @return a positive value if s is open (i.e. avcodec_open2() was called on it
* with no corresponding avcodec_close()), 0 otherwise.
*/
public static native int avcodec_is_open(AVCodecContext s);
/**
* @return a non-zero number if codec is an encoder, zero otherwise
*/
public static native int av_codec_is_encoder(AVCodec codec);
/**
* @return a non-zero number if codec is a decoder, zero otherwise
*/
public static native int av_codec_is_decoder(AVCodec codec);
/**
* @}
*/
}