Java Examples for org.bytedeco.javacv.OpenCVFrameGrabber
The following java examples will help you to understand the usage of org.bytedeco.javacv.OpenCVFrameGrabber. These source code samples are taken from different open source projects.
Example 1
| Project: PapAR-master File: CameraOpenCV.java View source code |
@Override
public void start() {
OpenCVFrameGrabber grabberCV = new OpenCVFrameGrabber(this.systemNumber);
grabberCV.setImageWidth(width());
grabberCV.setImageHeight(height());
grabberCV.setImageMode(FrameGrabber.ImageMode.COLOR);
try {
grabberCV.start();
this.grabber = grabberCV;
this.isConnected = true;
} catch (Exception e) {
System.err.println("Could not start frameGrabber... " + e);
System.err.println("Could not camera start frameGrabber... " + e);
System.err.println("Camera ID " + this.systemNumber + " could not start.");
System.err.println("Check cable connection, ID and resolution asked.");
this.grabber = null;
}
}Example 2
| Project: javac-master File: WebcamAndMicrophoneCapture.java View source code |
public static void main(String[] args) throws Exception, org.bytedeco.javacv.FrameGrabber.Exception {
int captureWidth = 1280;
int captureHeight = 720;
// The available FrameGrabber classes include OpenCVFrameGrabber (opencv_videoio),
// DC1394FrameGrabber, FlyCaptureFrameGrabber, OpenKinectFrameGrabber,
// PS3EyeFrameGrabber, VideoInputFrameGrabber, and FFmpegFrameGrabber.
OpenCVFrameGrabber grabber = new OpenCVFrameGrabber(WEBCAM_DEVICE_INDEX);
grabber.setImageWidth(captureWidth);
grabber.setImageHeight(captureHeight);
grabber.start();
// org.bytedeco.javacv.FFmpegFrameRecorder.FFmpegFrameRecorder(String
// filename, int imageWidth, int imageHeight, int audioChannels)
// For each param, we're passing in...
// filename = either a path to a local file we wish to create, or an
// RTMP url to an FMS / Wowza server
// imageWidth = width we specified for the grabber
// imageHeight = height we specified for the grabber
// audioChannels = 2, because we like stereo
FFmpegFrameRecorder recorder = new FFmpegFrameRecorder("rtmp://my-streaming-server/app_name_here/instance_name/stream_name", captureWidth, captureHeight, 2);
recorder.setInterleaved(true);
// decrease "startup" latency in FFMPEG (see:
// https://trac.ffmpeg.org/wiki/StreamingGuide)
recorder.setVideoOption("tune", "zerolatency");
// tradeoff between quality and encode speed
// possible values are ultrafast,superfast, veryfast, faster, fast,
// medium, slow, slower, veryslow
// ultrafast offers us the least amount of compression (lower encoder
// CPU) at the cost of a larger stream size
// at the other end, veryslow provides the best compression (high
// encoder CPU) while lowering the stream size
// (see: https://trac.ffmpeg.org/wiki/Encode/H.264)
recorder.setVideoOption("preset", "ultrafast");
// Constant Rate Factor (see: https://trac.ffmpeg.org/wiki/Encode/H.264)
recorder.setVideoOption("crf", "28");
// 2000 kb/s, reasonable "sane" area for 720
recorder.setVideoBitrate(2000000);
recorder.setVideoCodec(avcodec.AV_CODEC_ID_H264);
recorder.setFormat("flv");
// FPS (frames per second)
recorder.setFrameRate(FRAME_RATE);
// Key frame interval, in our case every 2 seconds -> 30 (fps) * 2 = 60
// (gop length)
recorder.setGopSize(GOP_LENGTH_IN_FRAMES);
// We don't want variable bitrate audio
recorder.setAudioOption("crf", "0");
// Highest quality
recorder.setAudioQuality(0);
// 192 Kbps
recorder.setAudioBitrate(192000);
recorder.setSampleRate(44100);
recorder.setAudioChannels(2);
recorder.setAudioCodec(avcodec.AV_CODEC_ID_AAC);
// Jack 'n coke... do it...
recorder.start();
// Thread for audio capture, this could be in a nested private class if you prefer...
new Thread(new Runnable() {
@Override
public void run() {
// Pick a format...
// NOTE: It is better to enumerate the formats that the system supports,
// because getLine() can error out with any particular format...
// For us: 44.1 sample rate, 16 bits, stereo, signed, little endian
AudioFormat audioFormat = new AudioFormat(44100.0F, 16, 2, true, false);
// Get TargetDataLine with that format
Mixer.Info[] minfoSet = AudioSystem.getMixerInfo();
Mixer mixer = AudioSystem.getMixer(minfoSet[AUDIO_DEVICE_INDEX]);
DataLine.Info dataLineInfo = new DataLine.Info(TargetDataLine.class, audioFormat);
try {
// Open and start capturing audio
// It's possible to have more control over the chosen audio device with this line:
// TargetDataLine line = (TargetDataLine)mixer.getLine(dataLineInfo);
TargetDataLine line = (TargetDataLine) AudioSystem.getLine(dataLineInfo);
line.open(audioFormat);
line.start();
int sampleRate = (int) audioFormat.getSampleRate();
int numChannels = audioFormat.getChannels();
// Let's initialize our audio buffer...
int audioBufferSize = sampleRate * numChannels;
byte[] audioBytes = new byte[audioBufferSize];
// Using a ScheduledThreadPoolExecutor vs a while loop with
// a Thread.sleep will allow
// us to get around some OS specific timing issues, and keep
// to a more precise
// clock as the fixed rate accounts for garbage collection
// time, etc
// a similar approach could be used for the webcam capture
// as well, if you wish
ScheduledThreadPoolExecutor exec = new ScheduledThreadPoolExecutor(1);
exec.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
try {
// Read from the line... non-blocking
int nBytesRead = line.read(audioBytes, 0, line.available());
// Since we specified 16 bits in the AudioFormat,
// we need to convert our read byte[] to short[]
// (see source from FFmpegFrameRecorder.recordSamples for AV_SAMPLE_FMT_S16)
// Let's initialize our short[] array
int nSamplesRead = nBytesRead / 2;
short[] samples = new short[nSamplesRead];
// Let's wrap our short[] into a ShortBuffer and
// pass it to recordSamples
ByteBuffer.wrap(audioBytes).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(samples);
ShortBuffer sBuff = ShortBuffer.wrap(samples, 0, nSamplesRead);
// recorder is instance of
// org.bytedeco.javacv.FFmpegFrameRecorder
recorder.recordSamples(sampleRate, numChannels, sBuff);
} catch (org.bytedeco.javacv.FrameRecorder.Exception e) {
e.printStackTrace();
}
}
}, 0, (long) 1000 / FRAME_RATE, TimeUnit.MILLISECONDS);
} catch (LineUnavailableException e1) {
e1.printStackTrace();
}
}
}).start();
// A really nice hardware accelerated component for our preview...
CanvasFrame cFrame = new CanvasFrame("Capture Preview", CanvasFrame.getDefaultGamma() / grabber.getGamma());
Frame capturedFrame = null;
// While we are capturing...
while ((capturedFrame = grabber.grab()) != null) {
if (cFrame.isVisible()) {
// Show our frame in the preview
cFrame.showImage(capturedFrame);
}
// as the delta from assignment to computed time could be too high
if (startTime == 0)
startTime = System.currentTimeMillis();
// Create timestamp for this frame
videoTS = 1000 * (System.currentTimeMillis() - startTime);
// Check for AV drift
if (videoTS > recorder.getTimestamp()) {
System.out.println("Lip-flap correction: " + videoTS + " : " + recorder.getTimestamp() + " -> " + (videoTS - recorder.getTimestamp()));
// We tell the recorder to write this frame at this timestamp
recorder.setTimestamp(videoTS);
}
// Send the frame to the org.bytedeco.javacv.FFmpegFrameRecorder
recorder.record(capturedFrame);
}
cFrame.dispose();
recorder.stop();
grabber.stop();
}Example 3
| Project: AtomicRNG-master File: AtomicRNG.java View source code |
/**
* The main function called by the JVM.<br>
* Most of the action happens in here.
* @param args
*/
public static void main(String[] args) {
// Workaround for java.lang.NoClassDefFoundError: Could not initialize class org.bytedeco.javacpp.avcodec
org.bytedeco.javacpp.Loader.load(org.bytedeco.javacpp.avcodec.class);
/*
* Automagically get the version from maven.
*/
BufferedReader reader = new BufferedReader(new InputStreamReader(AtomicRNG.class.getResourceAsStream("/META-INF/maven/" + AtomicRNG.class.getPackage().getName() + "/AtomicRNG/pom.properties")));
String line;
try {
while ((line = reader.readLine()) != null) if (line.substring(0, 8).equals("version=")) {
version = line.substring(8);
break;
}
reader.close();
} catch (IOException e) {
e.printStackTrace();
System.exit(1);
}
/*
* Startup: Print program name and copyright.
*/
System.out.println("AtomicRNG v" + version + System.lineSeparator() + "(c) 2015 by Thomas \"V10lator\" Rohloff." + System.lineSeparator());
/*
* Parse commandline arguments.
*/
boolean quiet = false, doubleView = false;
for (String arg : args) {
switch(arg) {
case ("-q"):
quiet = true;
break;
case ("-f"):
if (// No multiple inits for multiple args.
EntropyQueue.f == null)
EntropyQueue.fileInit();
break;
case ("-d"):
doubleView = true;
break;
/* case("-ef"):
experimentalFilter = true;
System.out.println("WARNING: Experimental noise filter activated!"+System.lineSeparator());
break;*/
case "-h":
System.out.println("Arguments:" + System.lineSeparator() + " -q : Be quiet." + System.lineSeparator() + " -f : Enable file output." + System.lineSeparator() + " -d : Enable double view." + System.lineSeparator() + // " -ef : Enable experimental filter."+System.lineSeparator()+
" -v : Enable video recorder." + System.lineSeparator() + " -h : Show this help." + System.lineSeparator());
return;
default:
System.out.println("Unknown argument: " + arg + System.lineSeparator() + System.lineSeparator());
System.exit(1);
}
}
/*
* Tell the user we're going to initialize the ARV device.
*/
System.out.print("Initializing Alpha Ray Visualizer... ");
/*
* Extract native libraries for use with JNA
*
try {
/*
* Create tmp dir and register it as JNAs library path.
*
FileAttribute<?>[] empty = {};
Path tmpDir = Files.createTempDirectory("AtomicRNG_", empty);
tmpDir.toFile().deleteOnExit();
System.setProperty("jna.library.path", tmpDir.toString());
tmpDir.toFile().deleteOnExit(); // Delete tmp dir on exit.
JarFile file = new JarFile(AtomicRNG.class.getProtectionDomain().getCodeSource().getLocation().getFile()); // Open our jar.
/*
* Extract all files.
*
String[] files = { "xxhash" };
String[] prefixes = { ".so", "__LICENSE.txt" };
String jarDir = "resources/";
Path jarFile;
String fileName;
InputStream inStream;
for(String suffix: files)
for(String prefix: prefixes) {
fileName = suffix+prefix;
inStream = file.getInputStream(file.getJarEntry(jarDir+fileName));
jarFile = tmpDir.resolve(fileName);
jarFile.toFile().deleteOnExit();
Files.copy(inStream, jarFile);
inStream.close();
}
file.close(); // close jar.
} catch (Exception e) {
System.err.println("error!");
e.printStackTrace();
System.exit(1);
}
/*
* Trap Cleanup().run() to be called when the JVM exits.
*/
Runtime.getRuntime().addShutdownHook(new Cleanup());
/*
* Open and start the webcam inside of the ARV device.
*/
atomicRNGDevice = new OpenCVFrameGrabber(0);
try {
atomicRNGDevice.start();
} catch (Exception e) {
restartAtomicRNGDevice(e);
}
/*
* Throw away the first frame cause of hardware init.
* The noise filters will handle the rest.
*/
try {
atomicRNGDevice.grab().release();
} catch (org.bytedeco.javacv.FrameGrabber.Exception e) {
restartAtomicRNGDevice(e);
}
/*
* Open the Linux RNG and keep it open all the time.
* We close it in Cleanup().run().
*/
EntropyQueue.resetOSrng();
new EntropyQueue().start();
/*
* In case we should draw the window initialize it and set its title.
*/
String[] stat = null, statOut = null;
CanvasFrame canvasFrame = null;
if (!quiet) {
canvasFrame = new CanvasFrame("AtomicRNG v" + version);
stat = new String[3];
stat[0] = "FPS: %1";
stat[1] = "%2 kb/s (%3 hashes/sec)";
stat[2] = "Queue: %4";
statOut = new String[3];
statOut[0] = "FPS: N/A";
statOut[1] = "N/A kb/s (N/A hashes/sec)";
statOut[2] = "Queue: N/A";
canvasFrame.setDefaultCloseOperation(CanvasFrame.EXIT_ON_CLOSE);
canvasFrame.getCanvas().addMouseListener(new AtomicMouseListener());
}
/*
* We initialized everything.
* Tell the user we're ready!
*/
System.out.println("done!");
/*
* A few Variables we'll need inside of the main loop.
*/
int fpsCount = 0, avgFPS = 0;
Color yellow = new Color(1.0f, 1.0f, 0.0f, 0.1f);
BufferedImage statImg = null;
Font font = new Font("Arial Black", Font.PLAIN, 18);
long lastFound = System.currentTimeMillis();
long lastSlice = lastFound;
/*
* All right, let's enter the matrix, eh, the main loop I mean...
*/
while (true) {
/*
* Grab a frame from the webcam.
*/
IplImage img = null;
try {
/*
* Grab a frame from the webcam.
*/
img = atomicRNGDevice.grab();
} catch (Exception e) {
restartAtomicRNGDevice(e);
}
if (img != null && !img.isNull()) {
/*
* First get the start time of that loop run.
*/
long start = System.currentTimeMillis();
if (!quiet)
fpsCount++;
/*
* After each 4 seconds...
*/
if (start - lastSlice >= 4000L) {
/*
* ...update the windows title with the newest statistics...
*/
if (!quiet) {
avgFPS = fpsCount >> 2;
if (((float) fpsCount / 4.0f) % 2 != 0)
avgFPS++;
String es = EntropyQueue.getStats();
for (int i = 0; i < stat.length; i++) statOut[i] = stat[i].replaceAll("%1", String.valueOf(avgFPS)).replaceAll("%2", String.valueOf((float) (byteCount >> 7) / 4.0f)).replaceAll("%3", String.valueOf((float) hashCount / 4.0f)).replaceAll("%4", es);
byteCount = hashCount = fpsCount = 0;
}
/*
* prepare to count the next 10 seconds and flush /dev/random.
*/
lastSlice = start;
}
if (randomImageNumber > 0 && start - lastRandomImageSlice >= 3600000L)
paintRandomImage();
/*
* The width is static, so if it's zero we never asked for it and other infos.
* Let's do that.
*/
if (firstRun) {
width = img.width();
height = img.height();
int rows = height >> 6, columns = width >> 6;
int cw = width / columns, ch = height / rows, yi;
scanners = new ImageScanner[rows][columns];
for (int y = 0; y < rows; y++) {
yi = y * ch;
for (int x = 0; x < columns; x++) scanners[y][x] = new ImageScanner(x * cw, yi);
}
/*
* Calculate the needed window size and paint the red line in the middle.
*/
if (!quiet) {
if (doubleView) {
statXoffset = width + 2;
statWidth = statXoffset + width;
} else
statWidth = width;
statImg = new BufferedImage(statWidth, height, BufferedImage.TYPE_3BYTE_BGR);
if (doubleView)
for (int x = width; x < statXoffset; x++) for (int y = 0; y < height; y++) statImg.setRGB(x, y, Color.RED.getRGB());
canvasFrame.setCanvasSize(statWidth, height);
}
}
Graphics graphics = null;
if (!quiet) {
graphics = statImg.getGraphics();
graphics.drawImage(img.getBufferedImage(), 0, 0, null);
if (doubleView) {
graphics.setColor(Color.BLACK);
graphics.fillRect(statXoffset, 0, statXoffset + width, height);
}
}
/*
* Wrap the frame to a Java BufferedImage and parse it pixel by pixel.
*/
ByteBuffer buffer = img.getByteBuffer();
boolean[][] ignoredPixels = new boolean[width][height];
for (int x = 0; x < width; x++) for (int y = 0; y < height; y++) ignoredPixels[x][y] = false;
ArrayList<Pixel>[] impacts = new ArrayList[(height >> 6) * (width >> 6)];
int c = 0;
for (ImageScanner[] isa : scanners) for (ImageScanner is : isa) impacts[c++] = is.scan(buffer, img.widthStep(), img.nChannels(), start, ignoredPixels);
boolean impact = false;
for (ArrayList<Pixel> list : impacts) if (!list.isEmpty()) {
for (Pixel pix : list) {
toOSrng(pix.x);
toOSrng(pix.power);
toOSrng(pix.y);
if (!quiet) {
crosses.add(pix);
if (randomImageNumber > 0)
randomImagePixels.add(pix);
}
}
toOSrng((int) (start - lastFound));
impact = true;
}
if (!quiet) {
Iterator<Pixel> iter = crosses.iterator();
Pixel pix;
graphics.setColor(Color.RED);
//boolean imp = false;
while (iter.hasNext()) {
pix = iter.next();
if (start - pix.found > 2000L) {
iter.remove();
continue;
}
paintCross(graphics, pix);
//imp = true;
}
/* TODO: Debugging stuff
if(imp)
try {
String fn = String.valueOf(in++);
while(fn.length() < 5)
fn = "0"+fn;
File out = new File("debug/"+fn+".png");
ImageIO.write(statImg, "png", out);
} catch(IOException e) {
e.printStackTrace();
}*/
}
/*
* Write the yellow, transparent text onto the window and update it.
*/
if (!quiet) {
graphics.setColor(yellow);
if (doubleView) {
graphics.setFont(font);
graphics.drawString("Raw", width / 2 - 25, 25);
graphics.drawString("Filtered", statXoffset + (width / 2 - 50), 25);
}
graphics.setFont(smallFont);
int ty = 1;
for (String st : statOut) graphics.drawString(st, 5, ty++ * 10);
graphics.setColor(Color.RED);
getLock(false);
if (videoOut != null) {
try {
ts += avgFPS == 0.0f ? start - lastFound : avgFPS;
//TODO: DEBUG!
videoOut.setTimestamp((int) ts);
videoOut.record(IplImage.createFrom(statImg));
lock.set(false);
} catch (org.bytedeco.javacv.FrameRecorder.Exception e) {
lock.set(false);
e.printStackTrace();
toggleRecording();
}
graphics.fillOval(statXoffset + width - 25, height - 25, 20, 20);
} else {
lock.set(false);
graphics.drawOval(statXoffset + width - 25, height - 25, 20, 20);
}
graphics.setColor(Color.GREEN);
if (randomImageNumber > 0)
graphics.fillOval(statXoffset + width - 50, height - 25, 20, 20);
else
graphics.drawOval(statXoffset + width - 50, height - 25, 20, 20);
if (!quiet)
canvasFrame.showImage(statImg);
}
if (impact)
lastFound = start;
/*
* Release the resources of the frame.
*/
img.release();
firstRun = false;
}
try {
/*
* Don't let us burn all CPU in case we're under heavy load.
*/
Thread.sleep(2L);
} catch (InterruptedException e) {
}
}
}Example 4
| Project: procamtracker-master File: RealityAugmentor.java View source code |
public void initVirtualSettings() throws Exception {
desktopScreen = null;
robot = null;
handMouseCursor = null;
videoToProject = null;
imageToProject = null;
chronometer = null;
virtualBall = null;
if (virtualSettings.desktopScreenNumber < 0 && virtualSettings.projectorImageFile == null && virtualSettings.projectorVideoFile == null) {
// if no image file is given, use our own special fractal as image :)
imageToProject = IplImage.create(projector.imageWidth, projector.imageHeight, IPL_DEPTH_8U, channels);
IplImage tempFloat = IplImage.create(projector.imageWidth, projector.imageHeight, IPL_DEPTH_32F, channels);
projector.getRectifyingHomography(camera, tempH);
JavaCV.fractalTriangleWave(tempFloat, tempH);
cvConvertScale(tempFloat, imageToProject, 255, 0);
} else if (virtualSettings.desktopScreenNumber >= 0) {
desktopScreen = CanvasFrame.getScreenDevice(virtualSettings.desktopScreenNumber);
robot = new Robot(desktopScreen);
int w = virtualSettings.desktopScreenWidth, h = virtualSettings.desktopScreenHeight;
if (w <= 0 || h <= 0) {
DisplayMode dm = desktopScreen.getDisplayMode();
w = dm.getWidth();
h = dm.getHeight();
}
try {
videoToProject = new FFmpegFrameGrabber(":0." + virtualSettings.desktopScreenNumber);
videoToProject.setFormat("x11grab");
videoToProject.setImageWidth(w);
videoToProject.setImageHeight(h);
videoToProject.setFrameRate(30);
switch(channels) {
case 1:
videoToProject.setPixelFormat(AV_PIX_FMT_GRAY8);
break;
case 3:
videoToProject.setPixelFormat(AV_PIX_FMT_BGR24);
break;
case 4:
videoToProject.setPixelFormat(AV_PIX_FMT_RGBA);
break;
default:
assert false;
}
videoToProject.start();
imageToProject = null;
} catch (FrameGrabber.Exception e) {
videoToProject = null;
imageToProject = IplImage.create(w, h, IPL_DEPTH_8U, channels);
}
handMouseCursor = ImageIO.read(getClass().getResource("icons/Choose.png"));
} else if (virtualSettings.projectorVideoFile != null) {
if (virtualSettings.projectorImageFile != null) {
OpenCVFrameConverter.ToIplImage converter1 = new OpenCVFrameConverter.ToIplImage();
Java2DFrameConverter converter2 = new Java2DFrameConverter();
// loads alpha channel
imageToProject = converter1.convert(converter2.getFrame(ImageIO.read(virtualSettings.projectorImageFile), 1.0, true));
if (imageToProject == null) {
throw new Exception("Error: Could not load projectorImageFile named \"" + virtualSettings.projectorImageFile + "\".");
}
// imageToProject.applyGamma(2.2);
final ByteBuffer buf = imageToProject.getByteBuffer();
final int width = imageToProject.width();
final int height = imageToProject.height();
final int step = imageToProject.widthStep();
final int channels = imageToProject.nChannels();
// minY = Integer.MAX_VALUE, maxY = Integer.MIN_VALUE;
for (int y = 0; y < height; y++) {
int pixel = y * step;
for (int x = 0; x < width; x++, pixel += channels) {
switch(channels) {
default:
assert false;
// RGBA
case 4:
// }
case 3:
buf.put(pixel + 2, (byte) Java2DFrameConverter.decodeGamma22(buf.get(pixel + 2)));
case 2:
buf.put(pixel + 1, (byte) Java2DFrameConverter.decodeGamma22(buf.get(pixel + 1)));
case 1:
buf.put(pixel + 0, (byte) Java2DFrameConverter.decodeGamma22(buf.get(pixel + 0)));
}
}
}
// if (maxX > minX && maxY > minY) {
// cvSetImageROI(imageToProject, cvRect(minX, minY, maxX-minX, maxY-minY));
// }
}
try {
videoToProject = new FFmpegFrameGrabber(virtualSettings.projectorVideoFile);
} catch (Throwable t) {
videoToProject = new OpenCVFrameGrabber(virtualSettings.projectorVideoFile);
}
if (videoToProject != null) {
videoToProject.setImageMode(ImageMode.COLOR);
switch(channels) {
case 1:
videoToProject.setPixelFormat(AV_PIX_FMT_GRAY8);
break;
case 3:
videoToProject.setPixelFormat(AV_PIX_FMT_BGR24);
break;
case 4:
videoToProject.setPixelFormat(AV_PIX_FMT_RGBA);
break;
default:
assert false;
}
if (imageToProject != null) {
videoToProject.setImageWidth(imageToProject.width());
videoToProject.setImageHeight(imageToProject.height());
}
videoToProject.start();
}
} else if (virtualSettings.projectorImageFile != null) {
// does not load alpha channel
imageToProject = channels == 4 ? cvLoadImageRGBA(virtualSettings.projectorImageFile.getAbsolutePath()) : cvLoadImage(virtualSettings.projectorImageFile.getAbsolutePath(), channels == 3 ? CV_LOAD_IMAGE_COLOR : CV_LOAD_IMAGE_GRAYSCALE);
if (imageToProject == null) {
throw new Exception("Error: Could not load projectorImageFile named \"" + virtualSettings.projectorImageFile + "\".");
}
Buffer buffer = imageToProject.createBuffer();
int depth = OpenCVFrameConverter.getFrameDepth(imageToProject.depth());
int stride = imageToProject.widthStep() * 8 / Math.abs(depth);
Java2DFrameConverter.applyGamma(buffer, depth, stride, 2.2);
}
}Example 5
| Project: javacv-master File: WebcamAndMicrophoneCapture.java View source code |
public static void main(String[] args) throws Exception, org.bytedeco.javacv.FrameGrabber.Exception {
int captureWidth = 1280;
int captureHeight = 720;
// The available FrameGrabber classes include OpenCVFrameGrabber (opencv_videoio),
// DC1394FrameGrabber, FlyCaptureFrameGrabber, OpenKinectFrameGrabber,
// PS3EyeFrameGrabber, VideoInputFrameGrabber, and FFmpegFrameGrabber.
OpenCVFrameGrabber grabber = new OpenCVFrameGrabber(WEBCAM_DEVICE_INDEX);
grabber.setImageWidth(captureWidth);
grabber.setImageHeight(captureHeight);
grabber.start();
// org.bytedeco.javacv.FFmpegFrameRecorder.FFmpegFrameRecorder(String
// filename, int imageWidth, int imageHeight, int audioChannels)
// For each param, we're passing in...
// filename = either a path to a local file we wish to create, or an
// RTMP url to an FMS / Wowza server
// imageWidth = width we specified for the grabber
// imageHeight = height we specified for the grabber
// audioChannels = 2, because we like stereo
FFmpegFrameRecorder recorder = new FFmpegFrameRecorder("rtmp://my-streaming-server/app_name_here/instance_name/stream_name", captureWidth, captureHeight, 2);
recorder.setInterleaved(true);
// decrease "startup" latency in FFMPEG (see:
// https://trac.ffmpeg.org/wiki/StreamingGuide)
recorder.setVideoOption("tune", "zerolatency");
// tradeoff between quality and encode speed
// possible values are ultrafast,superfast, veryfast, faster, fast,
// medium, slow, slower, veryslow
// ultrafast offers us the least amount of compression (lower encoder
// CPU) at the cost of a larger stream size
// at the other end, veryslow provides the best compression (high
// encoder CPU) while lowering the stream size
// (see: https://trac.ffmpeg.org/wiki/Encode/H.264)
recorder.setVideoOption("preset", "ultrafast");
// Constant Rate Factor (see: https://trac.ffmpeg.org/wiki/Encode/H.264)
recorder.setVideoOption("crf", "28");
// 2000 kb/s, reasonable "sane" area for 720
recorder.setVideoBitrate(2000000);
recorder.setVideoCodec(avcodec.AV_CODEC_ID_H264);
recorder.setFormat("flv");
// FPS (frames per second)
recorder.setFrameRate(FRAME_RATE);
// Key frame interval, in our case every 2 seconds -> 30 (fps) * 2 = 60
// (gop length)
recorder.setGopSize(GOP_LENGTH_IN_FRAMES);
// We don't want variable bitrate audio
recorder.setAudioOption("crf", "0");
// Highest quality
recorder.setAudioQuality(0);
// 192 Kbps
recorder.setAudioBitrate(192000);
recorder.setSampleRate(44100);
recorder.setAudioChannels(2);
recorder.setAudioCodec(avcodec.AV_CODEC_ID_AAC);
// Jack 'n coke... do it...
recorder.start();
// Thread for audio capture, this could be in a nested private class if you prefer...
new Thread(new Runnable() {
@Override
public void run() {
// Pick a format...
// NOTE: It is better to enumerate the formats that the system supports,
// because getLine() can error out with any particular format...
// For us: 44.1 sample rate, 16 bits, stereo, signed, little endian
AudioFormat audioFormat = new AudioFormat(44100.0F, 16, 2, true, false);
// Get TargetDataLine with that format
Mixer.Info[] minfoSet = AudioSystem.getMixerInfo();
Mixer mixer = AudioSystem.getMixer(minfoSet[AUDIO_DEVICE_INDEX]);
DataLine.Info dataLineInfo = new DataLine.Info(TargetDataLine.class, audioFormat);
try {
// Open and start capturing audio
// It's possible to have more control over the chosen audio device with this line:
// TargetDataLine line = (TargetDataLine)mixer.getLine(dataLineInfo);
TargetDataLine line = (TargetDataLine) AudioSystem.getLine(dataLineInfo);
line.open(audioFormat);
line.start();
int sampleRate = (int) audioFormat.getSampleRate();
int numChannels = audioFormat.getChannels();
// Let's initialize our audio buffer...
int audioBufferSize = sampleRate * numChannels;
byte[] audioBytes = new byte[audioBufferSize];
// Using a ScheduledThreadPoolExecutor vs a while loop with
// a Thread.sleep will allow
// us to get around some OS specific timing issues, and keep
// to a more precise
// clock as the fixed rate accounts for garbage collection
// time, etc
// a similar approach could be used for the webcam capture
// as well, if you wish
ScheduledThreadPoolExecutor exec = new ScheduledThreadPoolExecutor(1);
exec.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
try {
// Read from the line... non-blocking
int nBytesRead = line.read(audioBytes, 0, line.available());
// Since we specified 16 bits in the AudioFormat,
// we need to convert our read byte[] to short[]
// (see source from FFmpegFrameRecorder.recordSamples for AV_SAMPLE_FMT_S16)
// Let's initialize our short[] array
int nSamplesRead = nBytesRead / 2;
short[] samples = new short[nSamplesRead];
// Let's wrap our short[] into a ShortBuffer and
// pass it to recordSamples
ByteBuffer.wrap(audioBytes).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(samples);
ShortBuffer sBuff = ShortBuffer.wrap(samples, 0, nSamplesRead);
// recorder is instance of
// org.bytedeco.javacv.FFmpegFrameRecorder
recorder.recordSamples(sampleRate, numChannels, sBuff);
} catch (org.bytedeco.javacv.FrameRecorder.Exception e) {
e.printStackTrace();
}
}
}, 0, (long) 1000 / FRAME_RATE, TimeUnit.MILLISECONDS);
} catch (LineUnavailableException e1) {
e1.printStackTrace();
}
}
}).start();
// A really nice hardware accelerated component for our preview...
CanvasFrame cFrame = new CanvasFrame("Capture Preview", CanvasFrame.getDefaultGamma() / grabber.getGamma());
Frame capturedFrame = null;
// While we are capturing...
while ((capturedFrame = grabber.grab()) != null) {
if (cFrame.isVisible()) {
// Show our frame in the preview
cFrame.showImage(capturedFrame);
}
// as the delta from assignment to computed time could be too high
if (startTime == 0)
startTime = System.currentTimeMillis();
// Create timestamp for this frame
videoTS = 1000 * (System.currentTimeMillis() - startTime);
// Check for AV drift
if (videoTS > recorder.getTimestamp()) {
System.out.println("Lip-flap correction: " + videoTS + " : " + recorder.getTimestamp() + " -> " + (videoTS - recorder.getTimestamp()));
// We tell the recorder to write this frame at this timestamp
recorder.setTimestamp(videoTS);
}
// Send the frame to the org.bytedeco.javacv.FFmpegFrameRecorder
recorder.record(capturedFrame);
}
cFrame.dispose();
recorder.stop();
grabber.stop();
}Example 6
| Project: myrobotlab-master File: VideoProcessor.java View source code |
public static String getDefaultFrameGrabberType() {
Platform platform = Runtime.getInstance().getPlatform();
if (platform.isWindows()) {
return "org.bytedeco.javacv.VideoInputFrameGrabber";
} else {
return "org.bytedeco.javacv.OpenCVFrameGrabber";
}
}