Java Examples for javax.sound.sampled.AudioFileFormat

The following java examples will help you to understand the usage of javax.sound.sampled.AudioFileFormat. These source code samples are taken from different open source projects.

Example 1
Project: openjdk-master  File: AiffFileWriter.java View source code
// METHODS TO IMPLEMENT AudioFileWriter
@Override
public AudioFileFormat.Type[] getAudioFileTypes(AudioInputStream stream) {
    AudioFileFormat.Type[] filetypes = new AudioFileFormat.Type[types.length];
    System.arraycopy(types, 0, filetypes, 0, types.length);
    // make sure we can write this stream
    AudioFormat format = stream.getFormat();
    AudioFormat.Encoding encoding = format.getEncoding();
    if ((AudioFormat.Encoding.ALAW.equals(encoding)) || (AudioFormat.Encoding.ULAW.equals(encoding)) || (AudioFormat.Encoding.PCM_SIGNED.equals(encoding)) || (AudioFormat.Encoding.PCM_UNSIGNED.equals(encoding))) {
        return filetypes;
    }
    return new AudioFileFormat.Type[0];
}
Example 2
Project: JSSRC-master  File: JSSRCResamplerTest.java View source code
/**
     *
     * This simple test downsamples and upsamples two test files
     *
     */
@Test
public void testReadSamples() throws Exception {
    String[] fileNames = new String[] { "/mono_short_test.wav", "/stereo_long_test.wav" };
    float[] outSamplingRates = new float[] { 11025f, 96000f };
    for (String inFileName : fileNames) {
        for (float outSamplingRate : outSamplingRates) {
            String inFilePath = this.getClass().getResource(inFileName).getPath();
            AudioInputStream audioInputStream = AudioSystem.getAudioInputStream(new File(inFilePath));
            AudioFormat sourceFormat = audioInputStream.getFormat();
            AudioFormat targetFormat = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, outSamplingRate, sourceFormat.getSampleSizeInBits(), sourceFormat.getChannels(), sourceFormat.getFrameSize(), sourceFormat.getFrameRate(), sourceFormat.isBigEndian());
            AudioInputStream inputStream = AudioSystem.getAudioInputStream(targetFormat, audioInputStream);
            AudioSystem.write(inputStream, AudioFileFormat.Type.WAVE, new File(String.format("%s_resampled_%d.wav", inFilePath, (int) outSamplingRate)));
        }
    }
}
Example 3
Project: Distributed-Speaker-Diarization-System-master  File: SplitterServiceImpl.java View source code
@Override
public void run() {
    try {
        IndexIntervalsToTimelineMapper intervalsToTimelineMapper = new IndexIntervalsToTimelineMapper(splitterRequest.getSingleIntervalLength());
        List<Map<String, List<double[]>>> timelines = new ArrayList<Map<String, List<double[]>>>();
        Map<String, List<double[]>> wholeTimeline = new LinkedHashMap<String, List<double[]>>();
        for (Map<String, List<int[]>> labeledIntervals : splitterRequest.getLabeledIntervalsKey()) {
            Map<String, List<double[]>> timeline = intervalsToTimelineMapper.mapGroupByLabel(labeledIntervals);
            timelines.add(timeline);
            wholeTimeline.putAll(timeline);
        }
        AudioFileSplitter audioFileSplitter = new AudioFileSplitter();
        AudioInfo audioInfo = splitterRequest.getAudioInfo();
        AudioFormat audioFormat = AudioInfoUtil.convert(audioInfo);
        audioFileSplitter.split(audioFormat, audioInfo.getDuration(), audioInfo.getAudioFile(), wholeTimeline, splitterRequest.getOutputDirectory(), AudioFileFormat.Type.WAVE);
        SplitterResponse splitterResponse = new SplitterResponse(splitterRequest.getRequestIdentifier(), timelines);
        backendService.notify(splitterResponse);
    } catch (Exception e) {
        if (logger.isErrorEnabled()) {
            logger.error(ExceptionUtils.getStackTrace(e));
        }
    }
}
Example 4
Project: GpsPrune-master  File: AudioClip.java View source code
/**
	 * @return length of this audio clip in seconds
	 */
public int getLengthInSeconds() {
    if (_lengthInSeconds == LENGTH_UNKNOWN) {
        try {
            AudioFileFormat format = null;
            if (getFile() != null)
                format = AudioSystem.getAudioFileFormat(getFile());
            else
                format = AudioSystem.getAudioFileFormat(new ByteArrayInputStream(_data));
            _lengthInSeconds = (int) (format.getFrameLength() / format.getFormat().getFrameRate());
        } catch (Exception e) {
            _lengthInSeconds = LENGTH_NOT_AVAILABLE;
        }
    }
    return _lengthInSeconds;
}
Example 5
Project: marytts-master  File: FDPSOLAProcessor.java View source code
public void convertToWav(AudioFormat audioformat) throws IOException {
    // Read the temp binary file into a wav file and delete the temp binary file
    if (tempOutBinaryFile != null) {
        double[] yOut = null;
        din = new LEDataInputStream(tempOutBinaryFile);
        yOut = din.readDouble(totalWrittenToFile);
        din.close();
        double tmpMax = MathUtils.getAbsMax(yOut);
        if (tmpMax > 1.0) {
            for (int n = 0; n < yOut.length; n++) yOut[n] /= tmpMax;
        }
        outputAudio = new DDSAudioInputStream(new BufferedDoubleDataSource(yOut), audioformat);
        AudioSystem.write(outputAudio, AudioFileFormat.Type.WAVE, new File(outputFile));
        File tmpFile = new File(tempOutBinaryFile);
        tmpFile.delete();
    //
    }
}
Example 6
Project: preservation-tools-master  File: AudioFilesConversion.java View source code
@SuppressWarnings("static-access")
private static void convertWavFile(File file) {
    File outputfile = new File(filetools.executables.CdRom_IsoImageChecker.archivFolder + "\\" + filename + ".wav");
    int totalFramesRead = 0;
    File fileIn = new File(file.toString());
    try {
        AudioInputStream audioInputStream = AudioSystem.getAudioInputStream(fileIn);
        int bytesPerFrame = audioInputStream.getFormat().getFrameSize();
        System.out.println("Bytes per Frame: " + bytesPerFrame);
        if (bytesPerFrame == AudioSystem.NOT_SPECIFIED) {
            // some audio formats may have unspecified frame size
            // in that case we may read any amount of bytes
            bytesPerFrame = 1;
        }
        // Set an arbitrary buffer size of 1024 frames.
        int numBytes = 1024 * bytesPerFrame;
        byte[] audioBytes = new byte[numBytes];
        try {
            int numBytesRead = 0;
            int numFramesRead = 0;
            while ((numBytesRead = audioInputStream.read(audioBytes)) != -1) {
                // Calculate the number of frames actually read.
                numFramesRead = numBytesRead / bytesPerFrame;
                totalFramesRead += numFramesRead;
                // Here, do something useful with the audio data that's
                // now in the audioBytes array...
                AudioFileFormat.Type filetype = new AudioFileFormat.Type(file.toString(), audioFolder);
                AudioFileFormat format = new AudioFileFormat(filetype, audioInputStream.getFormat(), numFramesRead);
                AudioSystem.write(audioInputStream, format.getType(), outputfile);
            }
        } catch (Exception ex) {
            System.out.println(ex);
        }
    } catch (Exception e) {
        System.out.println(e);
    }
}
Example 7
Project: classlib6-master  File: AiffFileReader.java View source code
// METHODS TO IMPLEMENT AudioFileReader
/**
     * Obtains the audio file format of the input stream provided.  The stream must
     * point to valid audio file data.  In general, audio file providers may
     * need to read some data from the stream before determining whether they
     * support it.  These parsers must
     * be able to mark the stream, read enough data to determine whether they
     * support the stream, and, if not, reset the stream's read pointer to its original
     * position.  If the input stream does not support this, this method may fail
     * with an IOException.
     * @param stream the input stream from which file format information should be
     * extracted
     * @return an <code>AudioFileFormat</code> object describing the audio file format
     * @throws UnsupportedAudioFileException if the stream does not point to valid audio
     * file data recognized by the system
     * @throws IOException if an I/O exception occurs
     * @see InputStream#markSupported
     * @see InputStream#mark
     */
public AudioFileFormat getAudioFileFormat(InputStream stream) throws UnsupportedAudioFileException, IOException {
    // fix for 4489272: AudioSystem.getAudioFileFormat() fails for InputStream, but works for URL
    AudioFileFormat aff = getCOMM(stream, true);
    // the following is not strictly necessary - but was implemented like that in 1.3.0 - 1.4.1
    // so I leave it as it was. May remove this for 1.5.0
    stream.reset();
    return aff;
}
Example 8
Project: emul-master  File: CassetteDeck.java View source code
public void playCassette() {
    synchronized (CassetteDeck.this) {
        if (cassetteReader != null)
            cassetteReader.close();
    }
    if (audioFile != null && cassetteEnabled.getBoolean()) {
        try {
            AudioFileFormat format = CassetteFileUtils.scanAudioFile(audioFile);
            synchronized (CassetteDeck.this) {
                cassetteReader = new CassetteReader(audioFile, format, cassetteDebug, this);
                prevPos = 0;
                prevTicks = getTickCount();
            }
        } catch (IOException e) {
            machine.getEventNotifier().notifyEvent(this, Level.ERROR, "Failed to open audio file " + audioFile + " for cassette: " + e.getMessage());
        } catch (UnsupportedAudioFileException e) {
            machine.getEventNotifier().notifyEvent(this, Level.ERROR, "Could not recognize audio format in " + audioFile + " for cassette: " + e.getMessage());
        }
    }
}
Example 9
Project: ikvm-openjdk-master  File: AiffFileReader.java View source code
// METHODS TO IMPLEMENT AudioFileReader
/**
     * Obtains the audio file format of the input stream provided.  The stream must
     * point to valid audio file data.  In general, audio file providers may
     * need to read some data from the stream before determining whether they
     * support it.  These parsers must
     * be able to mark the stream, read enough data to determine whether they
     * support the stream, and, if not, reset the stream's read pointer to its original
     * position.  If the input stream does not support this, this method may fail
     * with an IOException.
     * @param stream the input stream from which file format information should be
     * extracted
     * @return an <code>AudioFileFormat</code> object describing the audio file format
     * @throws UnsupportedAudioFileException if the stream does not point to valid audio
     * file data recognized by the system
     * @throws IOException if an I/O exception occurs
     * @see InputStream#markSupported
     * @see InputStream#mark
     */
public AudioFileFormat getAudioFileFormat(InputStream stream) throws UnsupportedAudioFileException, IOException {
    // fix for 4489272: AudioSystem.getAudioFileFormat() fails for InputStream, but works for URL
    AudioFileFormat aff = getCOMM(stream, true);
    // the following is not strictly necessary - but was implemented like that in 1.3.0 - 1.4.1
    // so I leave it as it was. May remove this for 1.5.0
    stream.reset();
    return aff;
}
Example 10
Project: ManagedRuntimeInitiative-master  File: AiffFileReader.java View source code
// METHODS TO IMPLEMENT AudioFileReader
/**
     * Obtains the audio file format of the input stream provided.  The stream must
     * point to valid audio file data.  In general, audio file providers may
     * need to read some data from the stream before determining whether they
     * support it.  These parsers must
     * be able to mark the stream, read enough data to determine whether they
     * support the stream, and, if not, reset the stream's read pointer to its original
     * position.  If the input stream does not support this, this method may fail
     * with an IOException.
     * @param stream the input stream from which file format information should be
     * extracted
     * @return an <code>AudioFileFormat</code> object describing the audio file format
     * @throws UnsupportedAudioFileException if the stream does not point to valid audio
     * file data recognized by the system
     * @throws IOException if an I/O exception occurs
     * @see InputStream#markSupported
     * @see InputStream#mark
     */
public AudioFileFormat getAudioFileFormat(InputStream stream) throws UnsupportedAudioFileException, IOException {
    // fix for 4489272: AudioSystem.getAudioFileFormat() fails for InputStream, but works for URL
    AudioFileFormat aff = getCOMM(stream, true);
    // the following is not strictly necessary - but was implemented like that in 1.3.0 - 1.4.1
    // so I leave it as it was. May remove this for 1.5.0
    stream.reset();
    return aff;
}
Example 11
Project: tika-master  File: AudioParser.java View source code
public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException {
    // AudioSystem expects the stream to support the mark feature
    if (!stream.markSupported()) {
        stream = new BufferedInputStream(stream);
    }
    try {
        AudioFileFormat fileFormat = AudioSystem.getAudioFileFormat(stream);
        Type type = fileFormat.getType();
        if (type == Type.AIFC || type == Type.AIFF) {
            metadata.set(Metadata.CONTENT_TYPE, "audio/x-aiff");
        } else if (type == Type.AU || type == Type.SND) {
            metadata.set(Metadata.CONTENT_TYPE, "audio/basic");
        } else if (type == Type.WAVE) {
            metadata.set(Metadata.CONTENT_TYPE, "audio/x-wav");
        }
        AudioFormat audioFormat = fileFormat.getFormat();
        int channels = audioFormat.getChannels();
        if (channels != AudioSystem.NOT_SPECIFIED) {
            metadata.set("channels", String.valueOf(channels));
        // TODO: Use XMPDM.TRACKS? (see also frame rate in AudioFormat)
        }
        float rate = audioFormat.getSampleRate();
        if (rate != AudioSystem.NOT_SPECIFIED) {
            metadata.set("samplerate", String.valueOf(rate));
            metadata.set(XMPDM.AUDIO_SAMPLE_RATE, Integer.toString((int) rate));
        }
        int bits = audioFormat.getSampleSizeInBits();
        if (bits != AudioSystem.NOT_SPECIFIED) {
            metadata.set("bits", String.valueOf(bits));
            if (bits == 8) {
                metadata.set(XMPDM.AUDIO_SAMPLE_TYPE, "8Int");
            } else if (bits == 16) {
                metadata.set(XMPDM.AUDIO_SAMPLE_TYPE, "16Int");
            } else if (bits == 32) {
                metadata.set(XMPDM.AUDIO_SAMPLE_TYPE, "32Int");
            }
        }
        metadata.set("encoding", audioFormat.getEncoding().toString());
        // Javadoc suggests that some of the following properties might
        // be available, but I had no success in finding any:
        // "duration" Long playback duration of the file in microseconds
        // "author" String name of the author of this file
        // "title" String title of this file
        // "copyright" String copyright message
        // "date" Date date of the recording or release
        // "comment" String an arbitrary text
        addMetadata(metadata, fileFormat.properties());
        addMetadata(metadata, audioFormat.properties());
    } catch (UnsupportedAudioFileException e) {
    }
    XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata);
    xhtml.startDocument();
    xhtml.endDocument();
}
Example 12
Project: EDMHouse-master  File: MpegAudioFileReader.java View source code
/**
     * Returns AudioFileFormat from URL.
     */
public AudioFileFormat getAudioFileFormat(URL url) throws UnsupportedAudioFileException, IOException {
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): begin");
    }
    long lFileLengthInBytes = AudioSystem.NOT_SPECIFIED;
    URLConnection conn = url.openConnection();
    // Tell shoucast server (if any) that SPI support shoutcast stream.
    conn.setRequestProperty("Icy-Metadata", "1");
    InputStream inputStream = conn.getInputStream();
    AudioFileFormat audioFileFormat = null;
    try {
        audioFileFormat = getAudioFileFormat(inputStream, lFileLengthInBytes);
    } finally {
        inputStream.close();
    }
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): end");
    }
    return audioFileFormat;
}
Example 13
Project: forplay-master  File: MpegAudioFileReader.java View source code
/**
     * Returns AudioFileFormat from URL.
     */
public AudioFileFormat getAudioFileFormat(URL url) throws UnsupportedAudioFileException, IOException {
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): begin");
    }
    long lFileLengthInBytes = AudioSystem.NOT_SPECIFIED;
    URLConnection conn = url.openConnection();
    // Tell shoucast server (if any) that SPI support shoutcast stream.
    conn.setRequestProperty("Icy-Metadata", "1");
    InputStream inputStream = conn.getInputStream();
    AudioFileFormat audioFileFormat = null;
    try {
        audioFileFormat = getAudioFileFormat(inputStream, lFileLengthInBytes);
    } finally {
        inputStream.close();
    }
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): end");
    }
    return audioFileFormat;
}
Example 14
Project: jcaptcha-trunk-master  File: SoundToWavHelper.java View source code
/**
     * retrieve a new SoundCaptcha using SoundCaptchaService and flush it to the response. <br/> Captcha are localized
     * using request locale. <br/>This method returns a 404 to the client instead of the image if the request isn't
     * correct (missing parameters, etc...).. <br/>The log may be null. <br/>
     *
     * @param theRequest  the request
     * @param theResponse the response
     * @param log         a commons logger
     * @param service     an SoundCaptchaService instance
     *
     * @throws java.io.IOException if a problem occurs during the jpeg generation process
     */
public static void flushNewCaptchaToResponse(HttpServletRequest theRequest, HttpServletResponse theResponse, Logger log, SoundCaptchaService service, String id, Locale locale) throws IOException {
    // call the ImageCaptchaService method to retrieve a captcha
    byte[] captchaChallengeAsWav = null;
    ByteArrayOutputStream wavOutputStream = new ByteArrayOutputStream();
    try {
        AudioInputStream stream = service.getSoundChallengeForID(id, locale);
        // call the ImageCaptchaService method to retrieve a captcha
        AudioSystem.write(stream, AudioFileFormat.Type.WAVE, wavOutputStream);
    //AudioSystem.(pAudioInputStream, AudioFileFormat.Type.WAVE, pFile);
    } catch (IllegalArgumentException e) {
        if (log != null && log.isWarnEnabled()) {
            log.warn("There was a try from " + theRequest.getRemoteAddr() + " to render an captcha with invalid ID :'" + id + "' or with a too long one");
            theResponse.sendError(HttpServletResponse.SC_NOT_FOUND);
            return;
        }
    } catch (CaptchaServiceException e) {
        if (log != null && log.isWarnEnabled()) {
            log.warn("Error trying to generate a captcha and " + "render its challenge as JPEG", e);
        }
        theResponse.sendError(HttpServletResponse.SC_NOT_FOUND);
        return;
    }
    captchaChallengeAsWav = wavOutputStream.toByteArray();
    // render the captcha challenge as a JPEG image in the response
    theResponse.setHeader("Cache-Control", "no-store");
    theResponse.setHeader("Pragma", "no-cache");
    theResponse.setDateHeader("Expires", 0);
    theResponse.setContentType("audio/x-wav");
    ServletOutputStream responseOutputStream = theResponse.getOutputStream();
    responseOutputStream.write(captchaChallengeAsWav);
    responseOutputStream.flush();
    responseOutputStream.close();
}
Example 15
Project: jclic-master  File: MpegAudioFileReader.java View source code
/**
	 * Returns AudioFileFormat from URL.
	 */
@Override
public AudioFileFormat getAudioFileFormat(URL url) throws UnsupportedAudioFileException, IOException {
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): begin");
    }
    long lFileLengthInBytes = AudioSystem.NOT_SPECIFIED;
    URLConnection conn = url.openConnection();
    // Tell shoucast server (if any) that SPI support shoutcast stream.
    conn.setRequestProperty("Icy-Metadata", "1");
    InputStream inputStream = conn.getInputStream();
    AudioFileFormat audioFileFormat = null;
    try {
        audioFileFormat = getAudioFileFormat(inputStream, lFileLengthInBytes);
    } finally {
        inputStream.close();
    }
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): end");
    }
    return audioFileFormat;
}
Example 16
Project: jflac-dev-master  File: FlacAudioFileReaderTest.java View source code
/**
     * Open buffered (supporting mark()) inputstream and get format.
     *
     * @throws UnsupportedAudioFileException
     * @throws IOException
     */
public void testGetAudioFileFormatWithBufferedFlacStream() throws UnsupportedAudioFileException, IOException {
    final FlacAudioFileReader flacAudioFileReader = new FlacAudioFileReader();
    final File flacTestFile = getFlacTestFile("cymbals.flac");
    InputStream in = null;
    try {
        in = new BufferedInputStream(new FileInputStream(flacTestFile));
        assertTrue("For this test the stream MUST support mark()", in.markSupported());
        final AudioFileFormat fileFormat = flacAudioFileReader.getAudioFileFormat(in);
        assertNotNull(fileFormat);
        final AudioFormat format = fileFormat.getFormat();
        assertEquals(44100f, format.getSampleRate());
        assertEquals(16, format.getSampleSizeInBits());
        assertEquals(2, format.getChannels());
        assertEquals("FLAC", format.getEncoding().toString());
    } finally {
        if (in != null) {
            try {
                in.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
}
Example 17
Project: jstk-master  File: SpeexPlayer.java View source code
public static AudioInputStream getAudioInputStream(InputStream is) throws UnsupportedAudioFileException, IOException {
    // so markSupported is true
    BufferedInputStream bis = new BufferedInputStream(is);
    if (!bis.markSupported())
        throw new Error("BufferedInputStream: mark not supported !?!");
    SpeexAudioFileReader reader = new SpeexAudioFileReader();
    bis.mark(max_headersize);
    AudioFileFormat speexFormat = null;
    speexFormat = reader.getAudioFileFormat(bis);
    bis.reset();
    Speex2PcmAudioInputStream pcmStream = new Speex2PcmAudioInputStream(bis, speexFormat.getFormat(), 0);
    AudioInputStream ais = new AudioInputStream(pcmStream, // so we invent our own AudioFormat:
    new AudioFormat(speexFormat.getFormat().getSampleRate(), 16, 1, true, false), //length: not known: seems ok to pass -1
    -1);
    return ais;
}
Example 18
Project: Konsolenradio-master  File: MpegAudioFileReader.java View source code
/**
     * Returns AudioFileFormat from URL.
     */
public AudioFileFormat getAudioFileFormat(URL url) throws UnsupportedAudioFileException, IOException {
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): begin");
    }
    long lFileLengthInBytes = AudioSystem.NOT_SPECIFIED;
    URLConnection conn = url.openConnection();
    // Tell shoucast server (if any) that SPI support shoutcast stream.
    conn.setRequestProperty("Icy-Metadata", "1");
    InputStream inputStream = conn.getInputStream();
    AudioFileFormat audioFileFormat = null;
    try {
        audioFileFormat = getAudioFileFormat(inputStream, lFileLengthInBytes);
    } finally {
        inputStream.close();
    }
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): end");
    }
    return audioFileFormat;
}
Example 19
Project: musique-master  File: AACAudioFileReader.java View source code
@Override
public AudioFileFormat getAudioFileFormat(File file) throws UnsupportedAudioFileException, IOException {
    InputStream in = null;
    try {
        in = new BufferedInputStream(new FileInputStream(file));
        in.mark(1000);
        final AudioFileFormat aff = getAudioFileFormat(in, (int) file.length());
        in.reset();
        return aff;
    } finally {
        if (in != null)
            in.close();
    }
}
Example 20
Project: soundlibs-master  File: MpegAudioFileReader.java View source code
/**
     * Returns AudioFileFormat from URL.
     */
public AudioFileFormat getAudioFileFormat(URL url) throws UnsupportedAudioFileException, IOException {
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): begin");
    }
    long lFileLengthInBytes = AudioSystem.NOT_SPECIFIED;
    URLConnection conn = url.openConnection();
    // Tell shoucast server (if any) that SPI support shoutcast stream.
    conn.setRequestProperty("Icy-Metadata", "1");
    InputStream inputStream = conn.getInputStream();
    AudioFileFormat audioFileFormat = null;
    try {
        audioFileFormat = getAudioFileFormat(inputStream, lFileLengthInBytes);
    } finally {
        inputStream.close();
    }
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): end");
    }
    return audioFileFormat;
}
Example 21
Project: zmpp-wandora-master  File: BlorbSounds.java View source code
/**
     * {@inheritDoc}
     */
protected boolean putToDatabase(final Chunk chunk, final int resnum) {
    final InputStream aiffStream = new MemoryInputStream(chunk.getMemory(), 0, chunk.getSize() + Chunk.CHUNK_HEADER_LENGTH);
    try {
        final AudioFileFormat aiffFormat = AudioSystem.getAudioFileFormat(aiffStream);
        final AudioInputStream stream = new AudioInputStream(aiffStream, aiffFormat.getFormat(), (long) chunk.getSize());
        final Clip clip = AudioSystem.getClip();
        clip.open(stream);
        sounds.put(resnum, new DefaultSoundEffect(clip));
        return true;
    } catch (Exception ex) {
        ex.printStackTrace();
    }
    return false;
}
Example 22
Project: Atlas-Game-Framework-master  File: Content.java View source code
/**
	 *
	 * @todo Write documentation
	 * @param <T>
	 * @param type
	 * @param url
	 * @return
	 * @since 0.2
	 */
public static <T> T load(Class<T> type, String url) {
    try {
        if (type == Texture2D.class) {
            BufferedImage image = ImageIO.read(new File(url));
            Texture2D texture = new Texture2D(image.getWidth(), image.getHeight(), image.getType());
            texture.setData(image.getData());
            return type.cast(texture);
        }
        if (type == Song.class) {
            File file = new File(url);
            AudioFileFormat format = AudioSystem.getAudioFileFormat(file);
            String album = format.properties().get("album").toString();
            String author = format.properties().get("author").toString();
            TimeSpan duration = new TimeSpan((long) format.properties().get("duration") / 1000);
            String genre = format.properties().get("mp3.id3tag.genre").toString();
            String title = format.properties().get("title").toString();
            String track = format.properties().get("mp3.id3tag.track").toString();
            Song song = new Song(album, author, duration, genre, title, track, file);
            return type.cast(song);
        }
    } catch (IOExceptionUnsupportedAudioFileException |  exception) {
        Logger.getLogger(Content.class.getName()).log(Level.SEVERE, null, exception);
    }
    return null;
}
Example 23
Project: bigbluebutton-master  File: AudioSender.java View source code
// ******************************* MAIN *******************************
/** The main method. */
public static void main(String[] args) {
    String daddr = null;
    int dport = 0;
    int payload_type = 0;
    int tone_freq = 500;
    double tone_amp = 1.0;
    int sample_rate = 8000;
    int sample_size = 1;
    int frame_size = 500;
    //=sample_rate/(frame_size/sample_size);
    int frame_rate;
    // byte_rate=frame_rate/frame_size=8000
    boolean linear_signed = false;
    boolean pcmu = false;
    boolean big_endian = false;
    String filename = null;
    boolean sound = true;
    boolean help = true;
    for (int i = 0; i < args.length; i++) {
        if (args[i].equals("-h")) {
            break;
        }
        if (i == 0 && args.length > 1) {
            daddr = args[i];
            dport = Integer.parseInt(args[++i]);
            help = false;
            continue;
        }
        if (args[i].equals("-p") && args.length > (i + 1)) {
            payload_type = Integer.parseInt(args[++i]);
            continue;
        }
        if (args[i].equals("-F") && args.length > (i + 1)) {
            sound = false;
            filename = args[++i];
            continue;
        }
        if (args[i].equals("-T") && args.length > (i + 1)) {
            sound = false;
            tone_freq = Integer.parseInt(args[++i]);
            continue;
        }
        if (args[i].equals("-A") && args.length > (i + 1)) {
            tone_amp = Double.parseDouble(args[++i]);
            continue;
        }
        if (args[i].equals("-S") && args.length > (i + 2)) {
            sample_rate = Integer.parseInt(args[++i]);
            sample_size = Integer.parseInt(args[++i]);
            continue;
        }
        if (args[i].equals("-L") && args.length > (i + 1)) {
            frame_size = Integer.parseInt(args[++i]);
            continue;
        }
        if (args[i].equals("-Z")) {
            linear_signed = true;
            continue;
        }
        if (args[i].equals("-U")) {
            pcmu = true;
            continue;
        }
        if (args[i].equals("-E")) {
            big_endian = true;
            continue;
        }
        // else, do:
        System.out.println("unrecognized param '" + args[i] + "'\n");
        help = true;
    }
    if (help) {
        System.out.println("usage:\n  java AudioSender <dest_addr> <dest_port> [options]");
        System.out.println("   options:");
        System.out.println("   -h               this help");
        System.out.println("   -p <type>        payload type");
        System.out.println("   -F <audio_file>  sends an audio file");
        System.out.println("   -T <frequency>   sends a tone of given frequency [Hz]");
        System.out.println("   -A <amplitude>   sets an amplitude factor [0:1]");
        System.out.println("   -S <rate> <size> sample rate [B/s], and size [B]");
        System.out.println("   -L <size>        frame size");
        System.out.println("   -Z               uses PCM linear signed format (linear unsigned is used as default)");
        System.out.println("   -U               uses PCMU format");
        System.out.println("   -E               uses big endian format");
        System.exit(0);
    }
    frame_rate = sample_rate / (frame_size / sample_size);
    AudioFormat.Encoding codec;
    if (pcmu)
        codec = AudioFormat.Encoding.ULAW;
    else if (linear_signed)
        codec = AudioFormat.Encoding.PCM_SIGNED;
    else
        // default
        codec = AudioFormat.Encoding.PCM_UNSIGNED;
    int tone_codec = ToneInputStream.PCM_LINEAR_UNSIGNED;
    if (linear_signed)
        tone_codec = ToneInputStream.PCM_LINEAR_SIGNED;
    try {
        RtpStreamSender sender;
        AudioInput audio_input = null;
        if (sound)
            AudioInput.initAudioLine();
        if (sound) {
            AudioFormat format = new AudioFormat(codec, sample_rate, 8 * sample_size, 1, sample_size, sample_rate, big_endian);
            System.out.println("System audio format: " + format);
            audio_input = new AudioInput(format);
            sender = new RtpStreamSender(audio_input.getInputStream(), false, payload_type, frame_rate, frame_size, daddr, dport);
        } else if (filename != null) {
            File file = new File(filename);
            if (filename.indexOf(".wav") > 0) {
                AudioFileFormat format = AudioSystem.getAudioFileFormat(file);
                System.out.println("File audio format: " + format);
                AudioInputStream audio_input_stream = AudioSystem.getAudioInputStream(file);
                sender = new RtpStreamSender(audio_input_stream, true, payload_type, frame_rate, frame_size, daddr, dport);
            } else {
                FileInputStream input_stream = new FileInputStream(file);
                sender = new RtpStreamSender(input_stream, true, payload_type, frame_rate, frame_size, daddr, dport);
            }
        } else {
            ToneInputStream tone = new ToneInputStream(tone_freq, tone_amp, sample_rate, sample_size, tone_codec, big_endian);
            sender = new RtpStreamSender(tone, true, payload_type, frame_rate, frame_size, daddr, dport);
        }
        if (sender != null) {
            sender.start();
            if (sound)
                audio_input.play();
            System.out.println("Press 'Return' to stop");
            System.in.read();
            sender.halt();
            if (sound)
                audio_input.stop();
            if (sound)
                AudioInput.closeAudioLine();
        } else {
            System.out.println("Error creating the rtp stream.");
        }
    } catch (Exception e) {
        e.printStackTrace();
    }
}
Example 24
Project: jdk7u-jdk-master  File: AiffFileReader.java View source code
// METHODS TO IMPLEMENT AudioFileReader
/**
     * Obtains the audio file format of the input stream provided.  The stream must
     * point to valid audio file data.  In general, audio file providers may
     * need to read some data from the stream before determining whether they
     * support it.  These parsers must
     * be able to mark the stream, read enough data to determine whether they
     * support the stream, and, if not, reset the stream's read pointer to its original
     * position.  If the input stream does not support this, this method may fail
     * with an IOException.
     * @param stream the input stream from which file format information should be
     * extracted
     * @return an <code>AudioFileFormat</code> object describing the audio file format
     * @throws UnsupportedAudioFileException if the stream does not point to valid audio
     * file data recognized by the system
     * @throws IOException if an I/O exception occurs
     * @see InputStream#markSupported
     * @see InputStream#mark
     */
public AudioFileFormat getAudioFileFormat(InputStream stream) throws UnsupportedAudioFileException, IOException {
    // fix for 4489272: AudioSystem.getAudioFileFormat() fails for InputStream, but works for URL
    AudioFileFormat aff = getCOMM(stream, true);
    // the following is not strictly necessary - but was implemented like that in 1.3.0 - 1.4.1
    // so I leave it as it was. May remove this for 1.5.0
    stream.reset();
    return aff;
}
Example 25
Project: mobicents-master  File: RecorderImpl.java View source code
/**
     * Initializes Recorder.
     * 
     * This methods is called when first packet arrives and format becomes known. 
     * 
     * @param fmt the format of the first arrived packet.
     */
private void openRecorderLine(AudioFormat fmt) {
    float sampleRate = (float) fmt.getSampleRate();
    int sampleSizeInBits = fmt.getSampleSizeInBits();
    int channels = fmt.getChannels();
    boolean bigEndian = fmt.getEndian() == 1;
    Encoding encoding = getEncoding(fmt.getEncoding());
    // int frameSize = (channels == AudioSystem.NOT_SPECIFIED || sampleSizeInBits == AudioSystem.NOT_SPECIFIED) ?
    // AudioSystem.NOT_SPECIFIED
    // : ((sampleSizeInBits + 7) / 8) * channels;
    format = new javax.sound.sampled.AudioFormat(encoding, sampleRate, sampleSizeInBits, channels, 1, sampleRate, bigEndian);
    //assign file type
    if (encoding == SpeexEncoding.SPEEX) {
        fileType = SPEEX;
    } else {
        fileType = AudioFileFormat.Type.WAVE;
    }
    recorderCache = new RecorderCache();
}
Example 26
Project: openjdk8-jdk-master  File: WaveFileWriter.java View source code
// METHODS TO IMPLEMENT AudioFileWriter
public AudioFileFormat.Type[] getAudioFileTypes(AudioInputStream stream) {
    AudioFileFormat.Type[] filetypes = new AudioFileFormat.Type[types.length];
    System.arraycopy(types, 0, filetypes, 0, types.length);
    // make sure we can write this stream
    AudioFormat format = stream.getFormat();
    AudioFormat.Encoding encoding = format.getEncoding();
    if (AudioFormat.Encoding.ALAW.equals(encoding) || AudioFormat.Encoding.ULAW.equals(encoding) || AudioFormat.Encoding.PCM_SIGNED.equals(encoding) || AudioFormat.Encoding.PCM_UNSIGNED.equals(encoding)) {
        return filetypes;
    }
    return new AudioFileFormat.Type[0];
}
Example 27
Project: QuiltPlayer-master  File: Mp3SpiId3Extractor.java View source code
/**
     * @param file
     *            the file to extract from.
     * @return Id3DataModel
     */
private void extract(final File file) {
    String albumTitle = null;
    String artistName = null;
    String songTitle = null;
    String trackNumber = null;
    Long duration = null;
    // Extract information from ID3-tag
    try {
        AudioFileFormat baseFileFormat = AudioSystem.getAudioFileFormat(file);
        if (baseFileFormat instanceof TAudioFileFormat) {
            Map<String, ?> properties = ((TAudioFileFormat) baseFileFormat).properties();
            albumTitle = (String) properties.get("album");
            songTitle = (String) properties.get("title");
            artistName = (String) properties.get("author");
            trackNumber = (String) properties.get("mp3.id3tag.track");
            duration = (Long) properties.get("duration");
        }
        Id3DataModel model = new Id3DataModel();
        model.setAlbumTitle(albumTitle);
        model.setArtistName(artistName);
        model.setSongTitle(songTitle);
        if (trackNumber != null) {
            String fileName = file.getName();
            Matcher m = p.matcher(fileName);
            boolean b = false;
            while (m.find()) {
                model.setTrackNumber(Integer.parseInt(m.group()));
                b = true;
                log.debug("Setting song number to " + m.group());
            }
            if (!b)
                model.setTrackNumber(Integer.parseInt(trackNumber));
        } else {
            /* No good, check the file name if it starts with a number */
            String fileName = file.getName();
            Matcher m = p.matcher(fileName);
            while (m.find()) {
                model.setTrackNumber(Integer.parseInt(m.group()));
                log.debug("Setting song number to " + m.group());
            }
        }
        model.setPath(file.getAbsoluteFile());
        model.setDuration(duration.intValue());
        if (storage != null) {
            storage.store(model);
        }
    } catch (Exception e) {
        unsuccessfull.add(file);
        log.error(e.getMessage());
        e.printStackTrace();
    }
}
Example 28
Project: Soen6471Frinika-master  File: MODImporter.java View source code
private File getSampleFile(Sample sample) {
    if (sample == null)
        return null;
    if (sample.getData() == null)
        return null;
    if (sample.getData().length == 0)
        return null;
    File file = samplestempfiles.get(sample);
    if (file != null)
        return file;
    try {
        file = File.createTempFile("sample", ".wav");
    } catch (IOException e) {
        e.printStackTrace();
        return null;
    }
    short[] data = sample.getData();
    ShortInputStream is = new ShortInputStream(data);
    float rate = (float) sample.getUnits().note2rate(36 + sample.getFineTune() + sample.getRelativeNote());
    AudioFormat format = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, rate, 16, 1, 2, rate * 2, false);
    AudioInputStream audio_inputstream = new AudioInputStream(is, format, data.length * 2);
    try {
        AudioSystem.write(audio_inputstream, AudioFileFormat.Type.WAVE, file);
    } catch (IOException e) {
        e.printStackTrace();
        if (file.exists())
            file.delete();
        return null;
    }
    // Temp file should be deleted on frinika exit!!!
    file.deleteOnExit();
    samplestempfiles.put(sample, file);
    return file;
}
Example 29
Project: sos-dendrogram-master  File: DecoderThread.java View source code
@Override
public void run() {
    // is being currently processed?
    if (currentlyDecoding.containsKey(encodedFile)) {
        Logger.getLogger("at.tuwien.ifs.somtoolbox.multichannel").finer("Already in process, waiting: " + encodedFile);
        // only wait for other thread to finish
        while (currentlyDecoding.containsKey(encodedFile)) {
            try {
                sleep(10000);
            } catch (InterruptedException ex1) {
            }
        }
        // finished, give msg to nodethread
        playbackThread.decodingFinished(pcmFile, channel, stats, this);
    } else {
        // do it the normal way
        AudioInputStream ais = null;
        try {
            // Put into hashmap
            currentlyDecoding.put(encodedFile, this);
            ais = AudioSystem.getAudioInputStream(encodedFile);
            ais = AudioSystem.getAudioInputStream(Constants.DATALINE_FORMAT, ais);
            AudioFormat.Encoding targetEncoding = AudioFormat.Encoding.PCM_SIGNED;
            AudioInputStream pcmAIS = AudioSystem.getAudioInputStream(targetEncoding, ais);
            AudioFileFormat.Type fileType = AudioFileFormat.Type.AU;
            AudioSystem.write(pcmAIS, fileType, pcmFile);
            Thread.sleep(300);
            // finished, give msg to nodethread
            playbackThread.decodingFinished(pcmFile, channel, stats, this);
        } catch (ThreadDeath td) {
            System.out.println("Da haben wir aber noch mal Glück gehabt");
            throw td;
        } catch (Exception ex) {
            ex.printStackTrace();
            playbackThread.decodingFailed(channel, stats);
            Logger.getLogger("at.tuwien.ifs.somtoolbox.multichannel").warning(ex.getMessage());
        } finally {
            currentlyDecoding.remove(encodedFile);
        }
    }
}
Example 30
Project: Sphinx-master  File: WavWriter.java View source code
/**
    * Writes the current stream to disc; override this method if you want to take 
    * additional action on file writes
    *
    * @param wavName name of the file to be written
    */
protected void writeFile(String wavName) {
    AudioFormat wavFormat = new AudioFormat(sampleRate, bitsPerSample, 1, isSigned, true);
    AudioFileFormat.Type outputType = getTargetType("wav");
    byte[] abAudioData = baos.toByteArray();
    ByteArrayInputStream bais = new ByteArrayInputStream(abAudioData);
    AudioInputStream ais = new AudioInputStream(bais, wavFormat, abAudioData.length / wavFormat.getFrameSize());
    File outWavFile = new File(wavName);
    if (AudioSystem.isFileTypeSupported(outputType, ais)) {
        try {
            AudioSystem.write(ais, outputType, outWavFile);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}
Example 31
Project: sphinx4-master  File: WavWriter.java View source code
/**
    * Writes the current stream to disc; override this method if you want to take 
    * additional action on file writes
    *
    * @param wavName name of the file to be written
    */
protected void writeFile(String wavName) {
    AudioFormat wavFormat = new AudioFormat(sampleRate, bitsPerSample, 1, isSigned, true);
    AudioFileFormat.Type outputType = getTargetType("wav");
    byte[] abAudioData = baos.toByteArray();
    ByteArrayInputStream bais = new ByteArrayInputStream(abAudioData);
    AudioInputStream ais = new AudioInputStream(bais, wavFormat, abAudioData.length / wavFormat.getFrameSize());
    File outWavFile = new File(wavName);
    if (AudioSystem.isFileTypeSupported(outputType, ais)) {
        try {
            AudioSystem.write(ais, outputType, outWavFile);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}
Example 32
Project: Triana-master  File: WriteAiff_old.java View source code
/*    public void setUpGUI(){

        JFileChooser chooser = new JFileChooser();
        chooser.setDialogTitle("Save Audio to File");

        if (chooser.showOpenDialog(null) == JFileChooser.APPROVE_OPTION) {
            String fn = chooser.getSelectedFile().getAbsolutePath();
            userScreen(chooser.getSelectedFile().getName());
                    updateParameter("fileName", fn);
                    lastDir = chooser.getSelectedFile().getPath();
                }
    }
 */
/**
     * **************************************************************************************** This method sets up the
     * output stream, and is only called IF there is a change in the  * format of the incoming audio. The
     * dataOutputStream is started - it's written to on the * fly in the AudioWriter class *
     * ****************************************************************************************
     */
public void setUpWriter(AudioFormat audioFormat) {
    System.out.println("Setting Up Writer...");
    if (fileName.endsWith(".aiff") || fileName.endsWith(".aif")) {
        //(formatType = WAV)){
        audioFileFormat = AudioFileFormat.Type.AIFF;
        fileName2 = fileName;
    } else {
        audioFileFormat = AudioFileFormat.Type.AIFF;
        fileName2 = fileName + ".aiff";
    }
    outputFile = new File(fileName2);
    //Creates new audio format object
    outputFormat = new AudioFormat(audioFormat.getEncoding(), audioFormat.getSampleRate(), audioFormat.getSampleSizeInBits(), audioFormat.getChannels(), audioFormat.getFrameSize(), audioFormat.getFrameRate(), audioFormat.isBigEndian());
    System.out.println("In WRITE : Format.. " + outputFormat);
    System.out.println("Frame size = " + outputFormat.getFrameSize());
    System.out.println("Frame Rate = " + outputFormat.getFrameRate());
    long lLengthInBytes = AudioSystem.NOT_SPECIFIED;
    try {
        dataOutputStream = AudioSystemShadow.getDataOutputStream(outputFile);
    } catch (IOException e) {
        e.printStackTrace();
    }
    audioFileFormat = AudioFileFormat.Type.AIFF;
    audioOutputStream = AudioSystemShadow.getAudioOutputStream(audioFileFormat, outputFormat, lLengthInBytes, dataOutputStream);
// This uses the DataLine.Info subclass to obtain and open a target data line
//    DataLine.Info info = new DataLine.Info(TargetDataLine.class, outputFormat);
//     try {
//       outputChannel = ((TargetDataLine) AudioSystem.getLine(info));
//        outputChannel.open(outputFormat);
//        outputChannel.start();
//    }
//    catch (LineUnavailableException e) {
//        System.err.println("ERROR!! line not supported : " + audioFormat);
//    }
}
Example 33
Project: AdvancedSoundSystemLibrary-master  File: SpeexAudioFileReader.java View source code
/**
   * Return the AudioFileFormat from the given InputStream. Implementation.
   * @param bitStream
   * @param baos
   * @param mediaLength
   * @return an AudioInputStream object based on the audio file data contained
   * in the input stream.
   * @exception UnsupportedAudioFileException if the File does not point to
   * a valid audio file data recognized by the system.
   * @exception IOException if an I/O exception occurs.
   */
protected AudioFileFormat getAudioFileFormat(final InputStream bitStream, ByteArrayOutputStream baos, final int mediaLength) throws UnsupportedAudioFileException, IOException {
    AudioFormat format;
    try {
        // beginning so other providers can attempt to read the stream.
        if (bitStream.markSupported()) {
            // maximum number of bytes to determine the stream encoding:
            // Size of 1st Ogg Packet (Speex header) = OGG_HEADERSIZE + SPEEX_HEADERSIZE + 1
            // Size of 2nd Ogg Packet (Comment)      = OGG_HEADERSIZE + comment_size + 1
            // Size of 3rd Ogg Header (First data)   = OGG_HEADERSIZE + number_of_frames
            // where number_of_frames < 256 and comment_size < 256 (if within 1 frame)
            bitStream.mark(3 * OGG_HEADERSIZE + SPEEX_HEADERSIZE + 256 + 256 + 2);
        }
        int mode = -1;
        int sampleRate = 0;
        int channels = 0;
        int frameSize = AudioSystem.NOT_SPECIFIED;
        float frameRate = AudioSystem.NOT_SPECIFIED;
        byte[] header = new byte[128];
        int segments = 0;
        int bodybytes = 0;
        DataInputStream dis = new DataInputStream(bitStream);
        if (baos == null)
            baos = new ByteArrayOutputStream(128);
        int origchksum;
        int chksum;
        // read the OGG header
        dis.readFully(header, 0, OGG_HEADERSIZE);
        baos.write(header, 0, OGG_HEADERSIZE);
        origchksum = readInt(header, 22);
        header[22] = 0;
        header[23] = 0;
        header[24] = 0;
        header[25] = 0;
        chksum = OggCrc.checksum(0, header, 0, OGG_HEADERSIZE);
        // make sure its a OGG header
        if (!OGGID.equals(new String(header, 0, 4))) {
            throw new UnsupportedAudioFileException("missing ogg id!");
        }
        // how many segments are there?
        segments = header[SEGOFFSET] & 0xFF;
        if (segments > 1) {
            throw new UnsupportedAudioFileException("Corrupt Speex Header: more than 1 segments");
        }
        dis.readFully(header, OGG_HEADERSIZE, segments);
        baos.write(header, OGG_HEADERSIZE, segments);
        chksum = OggCrc.checksum(chksum, header, OGG_HEADERSIZE, segments);
        // get the number of bytes in the segment
        bodybytes = header[OGG_HEADERSIZE] & 0xFF;
        if (bodybytes != SPEEX_HEADERSIZE) {
            throw new UnsupportedAudioFileException("Corrupt Speex Header: size=" + bodybytes);
        }
        // read the Speex header
        dis.readFully(header, OGG_HEADERSIZE + 1, bodybytes);
        baos.write(header, OGG_HEADERSIZE + 1, bodybytes);
        chksum = OggCrc.checksum(chksum, header, OGG_HEADERSIZE + 1, bodybytes);
        // make sure its a Speex header
        if (!SPEEXID.equals(new String(header, OGG_HEADERSIZE + 1, 8))) {
            throw new UnsupportedAudioFileException("Corrupt Speex Header: missing Speex ID");
        }
        mode = readInt(header, OGG_HEADERSIZE + 1 + 40);
        sampleRate = readInt(header, OGG_HEADERSIZE + 1 + 36);
        channels = readInt(header, OGG_HEADERSIZE + 1 + 48);
        int nframes = readInt(header, OGG_HEADERSIZE + 1 + 64);
        boolean vbr = readInt(header, OGG_HEADERSIZE + 1 + 60) == 1;
        // Checksum
        if (chksum != origchksum)
            throw new IOException("Ogg CheckSums do not match");
        // Calculate frameSize
        if (!vbr) {
        // Frames size is a constant so:
        // Read Comment Packet the Ogg Header of 1st data packet;
        // the array table_segment repeats the frame size over and over.
        }
        // Calculate frameRate
        if (mode >= 0 && mode <= 2 && nframes > 0) {
            frameRate = ((float) sampleRate) / ((mode == 0 ? 160f : (mode == 1 ? 320f : 640f)) * ((float) nframes));
        }
        format = new AudioFormat(SpeexEncoding.SPEEX, (float) sampleRate, AudioSystem.NOT_SPECIFIED, channels, frameSize, frameRate, false);
    } catch (UnsupportedAudioFileException e) {
        if (bitStream.markSupported()) {
            bitStream.reset();
        }
        throw e;
    } catch (IOException ioe) {
        if (bitStream.markSupported()) {
            bitStream.reset();
        }
        throw new UnsupportedAudioFileException(ioe.getMessage());
    }
    return new AudioFileFormat(SpeexFileFormatType.SPEEX, format, AudioSystem.NOT_SPECIFIED);
}
Example 34
Project: festivoice-master  File: SpeexAudioFileReader.java View source code
/**
   * Return the AudioFileFormat from the given InputStream. Implementation.
   * @param bitStream
   * @param baos
   * @param mediaLength
   * @return an AudioInputStream object based on the audio file data contained
   * in the input stream.
   * @exception UnsupportedAudioFileException if the File does not point to
   * a valid audio file data recognized by the system.
   * @exception IOException if an I/O exception occurs.
   */
protected AudioFileFormat getAudioFileFormat(final InputStream bitStream, ByteArrayOutputStream baos, final int mediaLength) throws UnsupportedAudioFileException, IOException {
    AudioFormat format;
    try {
        // beginning so other providers can attempt to read the stream.
        if (bitStream.markSupported()) {
            // maximum number of bytes to determine the stream encoding:
            // Size of 1st Ogg Packet (Speex header) = OGG_HEADERSIZE + SPEEX_HEADERSIZE + 1
            // Size of 2nd Ogg Packet (Comment)      = OGG_HEADERSIZE + comment_size + 1
            // Size of 3rd Ogg Header (First data)   = OGG_HEADERSIZE + number_of_frames
            // where number_of_frames < 256 and comment_size < 256 (if within 1 frame)
            bitStream.mark(3 * OGG_HEADERSIZE + SPEEX_HEADERSIZE + 256 + 256 + 2);
        }
        int mode = -1;
        int sampleRate = 0;
        int channels = 0;
        int frameSize = AudioSystem.NOT_SPECIFIED;
        float frameRate = AudioSystem.NOT_SPECIFIED;
        byte[] header = new byte[128];
        int segments = 0;
        int bodybytes = 0;
        DataInputStream dis = new DataInputStream(bitStream);
        if (baos == null)
            baos = new ByteArrayOutputStream(128);
        int origchksum;
        int chksum;
        // read the OGG header
        dis.readFully(header, 0, OGG_HEADERSIZE);
        baos.write(header, 0, OGG_HEADERSIZE);
        origchksum = readInt(header, 22);
        header[22] = 0;
        header[23] = 0;
        header[24] = 0;
        header[25] = 0;
        chksum = OggCrc.checksum(0, header, 0, OGG_HEADERSIZE);
        // make sure its a OGG header
        if (!OGGID.equals(new String(header, 0, 4))) {
            throw new UnsupportedAudioFileException("missing ogg id!");
        }
        // how many segments are there?
        segments = header[SEGOFFSET] & 0xFF;
        if (segments > 1) {
            throw new UnsupportedAudioFileException("Corrupt Speex Header: more than 1 segments");
        }
        dis.readFully(header, OGG_HEADERSIZE, segments);
        baos.write(header, OGG_HEADERSIZE, segments);
        chksum = OggCrc.checksum(chksum, header, OGG_HEADERSIZE, segments);
        // get the number of bytes in the segment
        bodybytes = header[OGG_HEADERSIZE] & 0xFF;
        if (bodybytes != SPEEX_HEADERSIZE) {
            throw new UnsupportedAudioFileException("Corrupt Speex Header: size=" + bodybytes);
        }
        // read the Speex header
        dis.readFully(header, OGG_HEADERSIZE + 1, bodybytes);
        baos.write(header, OGG_HEADERSIZE + 1, bodybytes);
        chksum = OggCrc.checksum(chksum, header, OGG_HEADERSIZE + 1, bodybytes);
        // make sure its a Speex header
        if (!SPEEXID.equals(new String(header, OGG_HEADERSIZE + 1, 8))) {
            throw new UnsupportedAudioFileException("Corrupt Speex Header: missing Speex ID");
        }
        mode = readInt(header, OGG_HEADERSIZE + 1 + 40);
        sampleRate = readInt(header, OGG_HEADERSIZE + 1 + 36);
        channels = readInt(header, OGG_HEADERSIZE + 1 + 48);
        int nframes = readInt(header, OGG_HEADERSIZE + 1 + 64);
        boolean vbr = readInt(header, OGG_HEADERSIZE + 1 + 60) == 1;
        // Checksum
        if (chksum != origchksum)
            throw new IOException("Ogg CheckSums do not match");
        // Calculate frameSize
        if (!vbr) {
        // Frames size is a constant so:
        // Read Comment Packet the Ogg Header of 1st data packet;
        // the array table_segment repeats the frame size over and over.
        }
        // Calculate frameRate
        if (mode >= 0 && mode <= 2 && nframes > 0) {
            frameRate = ((float) sampleRate) / ((mode == 0 ? 160f : (mode == 1 ? 320f : 640f)) * ((float) nframes));
        }
        format = new AudioFormat(SpeexEncoding.SPEEX, (float) sampleRate, AudioSystem.NOT_SPECIFIED, channels, frameSize, frameRate, false);
    } catch (UnsupportedAudioFileException e) {
        if (bitStream.markSupported()) {
            bitStream.reset();
        }
        throw e;
    } catch (IOException ioe) {
        if (bitStream.markSupported()) {
            bitStream.reset();
        }
        throw new UnsupportedAudioFileException(ioe.getMessage());
    }
    return new AudioFileFormat(SpeexFileFormatType.SPEEX, format, AudioSystem.NOT_SPECIFIED);
}
Example 35
Project: computoser-master  File: Midi2WavRenderer.java View source code
/*
     * Render sequence using selected or default soundbank into wave audio file.
     */
public static void render(Sequence sequence, OutputStream outStream) {
    try {
        // Find available AudioSynthesizer.
        AudioSynthesizer synth = findAudioSynthesizer();
        if (synth == null) {
            logger.warn("No AudioSynhtesizer was found!");
            return;
        }
        // Open AudioStream from AudioSynthesizer.
        AudioInputStream stream = synth.openStream(null, null);
        Generator.loadSoundbankInstruments(synth);
        // Play Sequence into AudioSynthesizer Receiver.
        double total = send(sequence, synth.getReceiver());
        // Calculate how long the WAVE file needs to be.
        long len = (long) (stream.getFormat().getFrameRate() * (total + 4));
        stream = new AudioInputStream(stream, stream.getFormat(), len);
        AudioSystem.write(stream, AudioFileFormat.Type.WAVE, outStream);
        synth.close();
    } catch (Exception e) {
        e.printStackTrace();
    }
}
Example 36
Project: couch-audio-recorder-master  File: SimpleAudioRecorder.java View source code
@Override
public synchronized void startRecording(String recordingId, String mixer, float gain) throws LineUnavailableException {
    if (isRecording || m_line != null || (m_line != null && m_line.isOpen())) {
        throw new LineUnavailableException();
    }
    AudioFormat audioFormat = new AudioFormat(16000.0F, 16, 1, true, true);
    audioFormat = getBestAudioFormat(audioFormat, mixer);
    DataLine.Info info = new DataLine.Info(TargetDataLine.class, audioFormat);
    Mixer selectedMixer = getSelectedMixer(mixer);
    if (selectedMixer == null) {
        m_line = (TargetDataLine) AudioSystem.getLine(info);
        selectedMixer = AudioSystem.getMixer(null);
    } else {
        m_line = (TargetDataLine) selectedMixer.getLine(info);
    }
    m_line.open(audioFormat);
    //FloatControl fc = (FloatControl) selectedMixer.getControl(FloatControl.Type.MASTER_GAIN);
    //System.out.println("Master Gain min: " + fc.getMinimum());
    //System.out.println("Master Gain min: " + fc.getMaximum());
    //ystem.out.println("Master Gain cur: " + fc.getValue());
    //fc.setValue(MIN_PRIORITY);
    AudioFileFormat.Type targetType = AudioFileFormat.Type.WAVE;
    m_audioInputStream = new AudioInputStream(m_line);
    m_targetType = targetType;
    m_outputFile = new File(root, recordingId + ".wav");
    isRecording = true;
    recordingStart = System.currentTimeMillis();
    new Recorder().start();
}
Example 37
Project: etyllica-master  File: VorbisAudioFileReader.java View source code
private AudioFileFormat getAudioFileFormat(PhysicalOggStream oggStream) throws IOException, UnsupportedAudioFileException {
    try {
        Collection streams = oggStream.getLogicalStreams();
        if (streams.size() != 1) {
            throw new UnsupportedAudioFileException("Only Ogg files with one logical Vorbis stream are supported.");
        }
        LogicalOggStream los = (LogicalOggStream) streams.iterator().next();
        if (los.getFormat() != LogicalOggStream.FORMAT_VORBIS) {
            throw new UnsupportedAudioFileException("Only Ogg files with one logical Vorbis stream are supported.");
        }
        VorbisStream vs = new VorbisStream(los);
        AudioFormat audioFormat = new AudioFormat((float) vs.getIdentificationHeader().getSampleRate(), 16, vs.getIdentificationHeader().getChannels(), true, true);
        return new AudioFileFormat(VorbisFormatType.getInstance(), audioFormat, AudioSystem.NOT_SPECIFIED);
    } catch (OggFormatException e) {
        throw new UnsupportedAudioFileException(e.getMessage());
    } catch (VorbisFormatException e) {
        throw new UnsupportedAudioFileException(e.getMessage());
    }
}
Example 38
Project: JMediaPlayer-master  File: MpegAudioFileReader.java View source code
/**
     * Returns AudioFileFormat from URL.
     */
public AudioFileFormat getAudioFileFormat(URL url) throws UnsupportedAudioFileException, IOException {
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): begin");
    }
    long lFileLengthInBytes = AudioSystem.NOT_SPECIFIED;
    URLConnection conn = url.openConnection();
    // Tell shoucast server (if any) that SPI support shoutcast stream.
    conn.setRequestProperty("Icy-Metadata", "1");
    InputStream inputStream = conn.getInputStream();
    AudioFileFormat audioFileFormat = null;
    try {
        audioFileFormat = getAudioFileFormat(inputStream, lFileLengthInBytes);
    } finally {
        inputStream.close();
    }
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): end");
    }
    return audioFileFormat;
}
Example 39
Project: jukefox-master  File: MpegAudioFileReader.java View source code
/**
     * Returns AudioFileFormat from URL.
     */
public AudioFileFormat getAudioFileFormat(URL url) throws UnsupportedAudioFileException, IOException {
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): begin");
    }
    long lFileLengthInBytes = AudioSystem.NOT_SPECIFIED;
    URLConnection conn = url.openConnection();
    // Tell shoucast server (if any) that SPI support shoutcast stream.
    conn.setRequestProperty("Icy-Metadata", "1");
    InputStream inputStream = conn.getInputStream();
    AudioFileFormat audioFileFormat = null;
    try {
        audioFileFormat = getAudioFileFormat(inputStream, lFileLengthInBytes);
    } finally {
        inputStream.close();
    }
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): end");
    }
    return audioFileFormat;
}
Example 40
Project: mmlTools-master  File: WavoutDataLine.java View source code
private void wavoutEndCheck(byte[] b, int off, int len) {
    if (!rec && (tempOutputStream != null)) {
        boolean stop = true;
        for (int i = 0; i < len; i++) {
            if (b[i] != 0) {
                stop = false;
                break;
            }
        }
        if (stop) {
            try {
                tempOutputStream.close();
                long size = tempFile.length() / format.getFrameSize();
                AudioInputStream in = new AudioInputStream(new FileInputStream(tempFile), format, size);
                AudioSystem.write(in, AudioFileFormat.Type.WAVE, outputStream);
                in.close();
                tempFile.delete();
                System.out.println("stopRec: " + size);
            } catch (IOException e) {
                e.printStackTrace();
            }
            tempOutputStream = null;
        }
    }
}
Example 41
Project: reaper--rest-in-peace-master  File: AppletMpegSPIWorkaround.java View source code
public static AudioFileFormat getAudioFileFormat(File file) throws UnsupportedAudioFileException, IOException {
    InputStream inputStream = new BufferedInputStream(new FileInputStream(file));
    try {
        if (DEBUG == true) {
            System.err.println("Using AppletMpegSPIWorkaround to get codec (AudioFileFormat:file)");
        }
        return getAudioFileFormat(inputStream);
    } finally {
        inputStream.close();
    }
}
Example 42
Project: robonobo-master  File: Mp3FormatSupportProvider.java View source code
public Stream getStreamForFile(File f) throws IOException {
    AudioFileFormat fileFormat;
    try {
        MpegAudioFileReader reader = new MpegAudioFileReader();
        fileFormat = reader.getAudioFileFormat(f);
    //			fileFormat = AudioSystem.getAudioFileFormat(f);
    } catch (UnsupportedAudioFileException e) {
        throw new IOException("File " + f.getAbsolutePath() + " does not appear to be an mp3 file");
    }
    if (!(fileFormat instanceof MpegAudioFileFormat))
        throw new IOException("File " + f.getAbsolutePath() + " does not appear to be an mp3 file");
    Stream s = new Stream();
    s.setMimeType(getMimeType());
    s.setSize(f.length());
    Map<String, Object> props = fileFormat.properties();
    // mp3 duration is in microsecs
    s.setDuration((getLongProp(props, "duration")) / 1000);
    String title = (getStringProp(props, "title")).trim();
    if (title.length() == 0)
        title = getTitleFromFileName(f);
    s.setTitle(title);
    String artist = (getStringProp(props, "author")).trim();
    if (artist.length() == 0)
        artist = "Unknown Artist";
    s.setAttrValue("artist", artist);
    String album = (getStringProp(props, "album")).trim();
    if (album.length() == 0)
        album = "Unknown Album";
    s.setAttrValue("album", album);
    s.setDescription((getStringProp(props, "comment")).trim());
    s.setAttrValue("year", (getStringProp(props, "date")).trim());
    s.setAttrValue("track", (getStringProp(props, "mp3.id3tag.track")).trim());
    return s;
}
Example 43
Project: SFXR-Plus-Plus-master  File: SFXRSound.java View source code
/**
	 * Generates a new .wav file of the sound.
	 * @param snd Sound to write
	 * @param pathName File's path
	 * @param name File's name (without .wav)
	 * @throws IOException
	 */
public static void writeToWav(SFXRSound snd, String pathName, String name) throws // Path, name.wav
IOException {
    long length = snd.getPcm().length;
    InputStream bais = new ByteArrayInputStream(snd.getPcm());
    AudioFormat af = new AudioFormat(Encoding.PCM_SIGNED, snd.getSampleRate(), 8, 2, 2, snd.getSampleRate(), false);
    AudioInputStream aisTemp = new AudioInputStream(bais, af, length);
    File fileOut = new File(pathName + name + ".wav");
    AudioFileFormat.Type fileType = AudioFileFormat.Type.WAVE;
    if (AudioSystem.isFileTypeSupported(fileType, aisTemp)) {
        AudioSystem.write(aisTemp, fileType, fileOut);
    }
}
Example 44
Project: speechless-master  File: Speeker.java View source code
@Override
public void run() {
    if (!speech.trim().isEmpty()) {
        ToastWindow toast = ToastWindow.showToast(MESSAGES.get("saving"), false);
        try {
            AudioInputStream audio = marytts.generateAudio(speech.toLowerCase());
            AudioSystem.write(audio, AudioFileFormat.Type.WAVE, file);
            toast.setVisible(false);
            toast.dispose();
            ToastWindow.showToast(MESSAGES.get("ready_saving"), true);
        } catch (SynthesisExceptionIOException |  e) {
            log.error("Unable to save speech", e);
            toast.setVisible(false);
            toast.dispose();
            SwingUtilities.invokeLater(() -> JOptionPane.showMessageDialog(null, MESSAGES.get("offending_speech"), MESSAGES.get("error"), JOptionPane.ERROR_MESSAGE));
        }
    }
}
Example 45
Project: TaiWebDeployUtils-master  File: MpegAudioFileReader.java View source code
/**
     * Returns AudioFileFormat from URL.
     */
public AudioFileFormat getAudioFileFormat(URL url) throws UnsupportedAudioFileException, IOException {
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): begin");
    }
    long lFileLengthInBytes = AudioSystem.NOT_SPECIFIED;
    URLConnection conn = url.openConnection();
    // Tell shoucast server (if any) that SPI support shoutcast stream.
    conn.setRequestProperty("Icy-Metadata", "1");
    InputStream inputStream = conn.getInputStream();
    AudioFileFormat audioFileFormat = null;
    try {
        audioFileFormat = getAudioFileFormat(inputStream, lFileLengthInBytes);
    } finally {
        inputStream.close();
    }
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): end");
    }
    return audioFileFormat;
}
Example 46
Project: TarsosDSP-master  File: Resynthesizer.java View source code
private void combineTwoMonoAudioFilesInTwoChannels(String first, String second, String outputFile) throws IOException, UnsupportedAudioFileException, LineUnavailableException {
    AudioInputStream stream = AudioSystem.getAudioInputStream(new File(first));
    final float sampleRate = (int) stream.getFormat().getSampleRate();
    final int numberOfSamples = (int) stream.getFrameLength();
    //2 bytes per sample, stereo (2 channels)
    final byte[] byteBuffer = new byte[numberOfSamples * 2 * 2];
    /*
			 * Read the source file data in the left channel
			 */
    stream = AudioSystem.getAudioInputStream(new File(first));
    byte[] sampleAsByteArray = new byte[2];
    for (int sample = 0; sample < numberOfSamples; sample++) {
        stream.read(sampleAsByteArray);
        byteBuffer[sample * 4 + 0] = sampleAsByteArray[0];
        byteBuffer[sample * 4 + 1] = sampleAsByteArray[1];
    }
    /*
			 * Read the source file data in the right channel
			 */
    stream = AudioSystem.getAudioInputStream(new File(second));
    sampleAsByteArray = new byte[2];
    for (int sample = 0; sample < numberOfSamples; sample++) {
        stream.read(sampleAsByteArray);
        byteBuffer[sample * 4 + 2] = sampleAsByteArray[0];
        byteBuffer[sample * 4 + 3] = sampleAsByteArray[1];
    }
    /*
			 * Write the data to a file.
			 */
    final AudioFormat audioFormat = new AudioFormat(sampleRate, 16, 2, true, false);
    final ByteArrayInputStream bais = new ByteArrayInputStream(byteBuffer);
    final AudioInputStream audioInputStream = new AudioInputStream(bais, audioFormat, numberOfSamples);
    final File out = new File(outputFile);
    AudioSystem.write(audioInputStream, AudioFileFormat.Type.WAVE, out);
    audioInputStream.close();
}
Example 47
Project: warlock2-master  File: RCPUtil.java View source code
public static void playSound(InputStream soundStream) {
    try {
        Clip clip = AudioSystem.getClip();
        BufferedInputStream bufferedStream = new BufferedInputStream(soundStream);
        AudioFileFormat format = AudioSystem.getAudioFileFormat(bufferedStream);
        final Flag finished = new Flag();
        finished.value = false;
        final AudioInputStream stream = new AudioInputStream(bufferedStream, format.getFormat(), format.getFrameLength());
        clip.open(stream);
        clip.addLineListener(new LineListener() {

            public void update(LineEvent event) {
                if (event.getType() == LineEvent.Type.STOP) {
                    try {
                        stream.close();
                        finished.value = true;
                    } catch (IOException e) {
                        e.printStackTrace();
                    }
                }
            }
        });
        clip.start();
        while (!finished.value) {
            if (Display.getDefault() != null) {
                Display.getDefault().readAndDispatch();
            } else {
                Thread.sleep((long) 500);
            }
        }
    } catch (FileNotFoundException e) {
        e.printStackTrace();
    } catch (LineUnavailableException e) {
        e.printStackTrace();
    } catch (UnsupportedAudioFileException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    } catch (InterruptedException e) {
        e.printStackTrace();
    }
}
Example 48
Project: xtrememp-swing-master  File: MpegAudioFileReader.java View source code
/**
     * Returns AudioFileFormat from URL.
     */
@Override
public AudioFileFormat getAudioFileFormat(URL url) throws UnsupportedAudioFileException, IOException {
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): begin");
    }
    long lFileLengthInBytes = AudioSystem.NOT_SPECIFIED;
    URLConnection conn = url.openConnection();
    // Tell shoucast server (if any) that SPI support shoutcast stream.
    conn.setRequestProperty("Icy-Metadata", "1");
    InputStream inputStream = conn.getInputStream();
    AudioFileFormat audioFileFormat = null;
    try {
        audioFileFormat = getAudioFileFormat(inputStream, lFileLengthInBytes);
    } finally {
        inputStream.close();
    }
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): end");
    }
    return audioFileFormat;
}
Example 49
Project: Amber-IDE-master  File: AudioIO.java View source code
public Audio readAudio(InputStream in) throws Exception {
    if (!al) {
        throw new IllegalStateException("cannot get ogg audio: AL context unavailable");
    }
    BufferedInputStream bais = new BufferedInputStream(in);
    int buffer = -1;
    AudioFileFormat format = null;
    try {
        IntBuffer buf = BufferUtils.createIntBuffer(1);
        OggDecoder decoder = new OggDecoder(bais);
        alGenBuffers(buf);
        alBufferData(buf.get(0), decoder.channels > 1 ? AL_FORMAT_STEREO16 : AL_FORMAT_MONO16, decoder.data, decoder.rate);
        OggDecoder.Stream aux = decoder.oggInput;
        buffer = buf.get(0);
        AudioFormat base = new AudioFormat(aux.getRate(), 16, alGetBufferi(buffer, AL_CHANNELS), true, aux.bigEndian);
        format = new AudioFileFormat(new AudioFileFormat.Type("OGG", "ogg"), base, (int) (base.getFrameRate() * (((alGetBufferi(buffer, AL_SIZE) / (alGetBufferi(buffer, AL_BITS) / 8)) / (float) alGetBufferi(buffer, AL_FREQUENCY)) / alGetBufferi(buffer, AL_CHANNELS) / 1000000 + 4)));
    } catch (Exception e) {
        ErrorHandler.alert(e);
    }
    if (buffer == -1) {
        throw new IOException("unable to load: " + in);
    }
    ALAudio ala = new ALAudio(buffer, format.getFormat());
    return ala;
}
Example 50
Project: IndiScene-master  File: KOSTAAudio.java View source code
/**
	 * @name : makeFile
	 * @date : 2015. 7. 6.
	 * @author : Kim Min Sic
	 * @description : merged audio byte array to audio File / return new audio file path
	 */
private String makeFile(byte[] byteBuffer, AudioInputStream ais, String userId) throws IOException {
    String timeName = userId + "_" + System.currentTimeMillis();
    //		String outputFilePath = "C:/KMS_MavenSpring/apache-tomcat-7.0.59/wtpwebapps/IndiScene/resources/MergeMusic/"+timeName;
    String outputFilePath = dir + timeName;
    ByteArrayInputStream bais = new ByteArrayInputStream(byteBuffer);
    AudioSystem.write(new AudioInputStream(bais, ais.getFormat(), byteBuffer.length), AudioFileFormat.Type.WAVE, new File(outputFilePath));
    return outputFilePath;
}
Example 51
Project: JamVM-PH-master  File: WAVReader.java View source code
/* Get an AudioFileFormat from the given InputStream.
   * @see javax.sound.sampled.spi.AudioFileReader#getAudioFileFormat(java.io.InputStream)
   */
public AudioFileFormat getAudioFileFormat(InputStream in) throws UnsupportedAudioFileException, IOException {
    DataInputStream din;
    if (in instanceof DataInputStream)
        din = (DataInputStream) in;
    else
        din = new DataInputStream(in);
    if (// "RIFF"
    din.readInt() != 0x52494646)
        throw new UnsupportedAudioFileException("Invalid WAV chunk header.");
    // Read the length of this RIFF thing.
    readUnsignedIntLE(din);
    if (// "WAVE"
    din.readInt() != 0x57415645)
        throw new UnsupportedAudioFileException("Invalid WAV chunk header.");
    boolean foundFmt = false;
    boolean foundData = false;
    short compressionCode = 0, numberChannels = 0, blockAlign = 0, bitsPerSample = 0;
    long sampleRate = 0, bytesPerSecond = 0;
    long chunkLength = 0;
    while (!foundData) {
        int chunkId = din.readInt();
        chunkLength = readUnsignedIntLE(din);
        switch(chunkId) {
            case // "fmt "
            0x666D7420:
                foundFmt = true;
                compressionCode = readUnsignedShortLE(din);
                numberChannels = readUnsignedShortLE(din);
                sampleRate = readUnsignedIntLE(din);
                bytesPerSecond = readUnsignedIntLE(din);
                blockAlign = readUnsignedShortLE(din);
                bitsPerSample = readUnsignedShortLE(din);
                din.skip(chunkLength - 16);
                break;
            case // "fact"
            0x66616374:
                // FIXME: hold compression format dependent data.
                din.skip(chunkLength);
                break;
            case // "data"
            0x64617461:
                if (!foundFmt)
                    throw new UnsupportedAudioFileException("This implementation requires WAV fmt chunks precede data chunks.");
                foundData = true;
                break;
            default:
                // Unrecognized chunk.  Skip it.
                din.skip(chunkLength);
        }
    }
    AudioFormat.Encoding encoding;
    switch(compressionCode) {
        case // PCM/uncompressed
        1:
            if (bitsPerSample <= 8)
                encoding = AudioFormat.Encoding.PCM_UNSIGNED;
            else
                encoding = AudioFormat.Encoding.PCM_SIGNED;
            break;
        default:
            throw new UnsupportedAudioFileException("Unrecognized WAV compression code: 0x" + Integer.toHexString(compressionCode));
    }
    return new AudioFileFormat(AudioFileFormat.Type.WAVE, new AudioFormat(encoding, (float) sampleRate, bitsPerSample, numberChannels, ((bitsPerSample + 7) / 8) * numberChannels, (float) bytesPerSecond, false), (int) chunkLength);
}
Example 52
Project: jersey-master  File: ToneGenerator.java View source code
/**
     * Writes the temporary file with the generated audio.
     *
     * @param inputStream input stream with the waveform
     * @param length      length of the waveform
     * @return name of the generated temporary file
     * @throws IOException
     */
private static String writeWav(InputStream inputStream, int length) throws IOException {
    AudioFormat format = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, SAMPLE_RATE, 8, 1, 1, SAMPLE_RATE, false);
    File file = File.createTempFile("wav", ".");
    AudioSystem.write(new AudioInputStream(inputStream, format, length), AudioFileFormat.Type.WAVE, file);
    return file.getAbsolutePath();
}
Example 53
Project: mpc_tp2-master  File: AudioP.java View source code
/** ExportSample
	 * Exports a sample as a mono WAV audio
	 * 
	 * @param fn - filename string
	 * @param sample - short[] of sample to be exported
	 */
public static void ExportSample(String fn, short[] sample) throws Exception {
    System.out.println("Exporting Sample");
    // Standard output format. 16bit, 44.1kHz, mono
    AudioFormat outputFormat = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, (float) 44100.0, 16, 1, 2, 2, false);
    // Convert down to byte[] inputStream
    byte[] sampleBytes = new byte[sample.length * 2];
    ByteBuffer.wrap(sampleBytes).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().put(sample);
    ByteArrayInputStream outputSampleStream = new ByteArrayInputStream(sampleBytes);
    // Output
    File outputAudio = new File(fn);
    AudioInputStream outputAIS = new AudioInputStream(outputSampleStream, outputFormat, outputSampleStream.available() / outputFormat.getFrameSize());
    AudioSystem.write(outputAIS, AudioFileFormat.Type.WAVE, outputAudio);
}
Example 54
Project: OpenNotification-master  File: FreeTTS.java View source code
public byte[] convert(byte[] data, AudioFormat oldformat, AudioFormat newformat) throws IOException {
    BrokerFactory.getLoggingBroker().logDebug("Converting to " + newformat);
    AudioInputStream ain = AudioSystem.getAudioInputStream(newformat, new AudioInputStream(new ByteArrayInputStream(data), oldformat, data.length));
    ByteArrayOutputStream newOut = new ByteArrayOutputStream();
    AudioSystem.write(ain, AudioFileFormat.Type.WAVE, newOut);
    byte[] outData = newOut.toByteArray();
    return newOut.toByteArray();
}
Example 55
Project: speech-master  File: MikeCapture.java View source code
/**
 * startrecording must be called to start the recording. This is called
 * from Mike.nextsample
 */
public void startrecording() {
    try {
        audioformat = parent.audioformat;
        channel.open(audioformat);
        channel.start();
        bufferSize = (int) (audioformat.getSampleRate() * audioformat.getFrameSize());
        buffer = new byte[bufferSize];
        // parent.showtime ("Before asking to record");
        System.out.println("Speak or hit control-C");
        last = new double[silences];
        for (int i = 0; i < silences; i++) last[i] = huge;
        listenToWrite();
    // AudioInputStream incoming = new AudioInputStream (channel);
    // AudioSystem.write (incoming, AudioFileFormat.Type.WAVE, stream);
    } catch (Exception e) {
        e.printStackTrace();
        Log.severe("Error recording: " + e.toString());
    }
}
Example 56
Project: TuxGuitar-master  File: MidiToAudioSettingsDialog.java View source code
public List<MidiToAudioFormat> getAvailableFormats() {
    List<MidiToAudioFormat> list = new ArrayList<MidiToAudioFormat>();
    AudioFormat srcFormat = MidiToAudioSettings.DEFAULT_FORMAT;
    AudioFormat.Encoding[] encodings = AudioSystem.getTargetEncodings(srcFormat);
    for (int i = 0; i < encodings.length; i++) {
        AudioFormat dstFormat = new AudioFormat(encodings[i], srcFormat.getSampleRate(), srcFormat.getSampleSizeInBits(), srcFormat.getChannels(), srcFormat.getFrameSize(), srcFormat.getFrameRate(), srcFormat.isBigEndian());
        AudioInputStream dstStream = new AudioInputStream(null, dstFormat, 0);
        AudioFileFormat.Type[] dstTypes = AudioSystem.getAudioFileTypes(dstStream);
        if (dstTypes.length > 0) {
            list.add(new MidiToAudioFormat(dstFormat, dstTypes));
        }
    }
    return list;
}
Example 57
Project: Zong-master  File: MidiToWaveRenderer.java View source code
/**
	 * Render sequence using selected or default soundbank into wave audio file.
	 * If activeTracks is not null, only the tracks with the given indices are rendered.
	 */
public static void render(Soundbank soundbank, Sequence sequence, Set<Integer> activeTracks, OutputStream wavOutputStream) throws IOException, MidiUnavailableException {
    // Find available AudioSynthesizer.
    AudioSynthesizer synth = findAudioSynthesizer();
    if (synth == null) {
        System.out.println("No AudioSynhtesizer was found!");
        System.exit(1);
    }
    // Open AudioStream from AudioSynthesizer.
    AudioInputStream stream = synth.openStream(null, null);
    // Load user-selected Soundbank into AudioSynthesizer.
    if (soundbank != null) {
        Soundbank defsbk = synth.getDefaultSoundbank();
        if (defsbk != null)
            synth.unloadAllInstruments(defsbk);
        synth.loadAllInstruments(soundbank);
    }
    // Play Sequence into AudioSynthesizer Receiver.
    double total = send(sequence, activeTracks, synth.getReceiver());
    // Calculate how long the WAVE file needs to be.
    long len = (long) (stream.getFormat().getFrameRate() * (total + 4));
    stream = new AudioInputStream(stream, stream.getFormat(), len);
    // Write WAVE file to disk.
    AudioSystem.write(stream, AudioFileFormat.Type.WAVE, wavOutputStream);
    // We are finished, close synthesizer.
    synth.close();
}
Example 58
Project: algo-improv-orch-master  File: Sample.java View source code
/**
	 * Write to a file.
	 * 
	 * @param fn
	 *            the file name
	 * 
	 * @throws IOException
	 */
public void write(String fn) throws IOException {
    byte[] bytes = new byte[nFrames * audioFormat.getFrameSize()];
    AudioUtils.floatToByte(bytes, interleave(buf), audioFormat.isBigEndian());
    ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
    AudioInputStream aos = new AudioInputStream(bais, audioFormat, nFrames);
    AudioSystem.write(aos, AudioFileFormat.Type.AIFF, new File(fn));
}
Example 59
Project: elphelvision_eclipse-master  File: AudioRecorder.java View source code
public void SetAudioOptions(int MixerIndex, int FormatIndex) {
    FormatID = FormatIndex;
    MixerID = MixerIndex;
    Info[] mixerinfo = AudioSystem.getMixerInfo();
    // select the mixer to record from
    Mixer mixer = AudioSystem.getMixer(mixerinfo[MixerIndex]);
    // get all available audio formats on that device
    AudioFormat[] supportedFormats = GetMixerCapabilities(MixerIndex);
    // we use WAV by default
    RecAudioFileFormat = AudioFileFormat.Type.WAVE;
    // 48KHz is hardcoded for now until we create a custom field in the settings for it
    if (supportedFormats[FormatIndex].getSampleRate() == -1) {
        RecAudioFormat = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, 48000.0F, supportedFormats[FormatIndex].getSampleSizeInBits(), supportedFormats[FormatIndex].getChannels(), supportedFormats[FormatIndex].getFrameSize(), 48000.0F, supportedFormats[FormatIndex].isBigEndian());
    } else {
        RecAudioFormat = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, supportedFormats[FormatIndex].getSampleRate(), supportedFormats[FormatIndex].getSampleSizeInBits(), supportedFormats[FormatIndex].getChannels(), supportedFormats[FormatIndex].getFrameSize(), supportedFormats[FormatIndex].getSampleRate(), supportedFormats[FormatIndex].isBigEndian());
    }
    TargetDataLine targetDataLine = null;
    try {
        targetDataLine = AudioSystem.getTargetDataLine(RecAudioFormat, mixerinfo[MixerIndex]);
        targetDataLine.open(RecAudioFormat);
    } catch (LineUnavailableException e) {
        Parent.WriteErrortoConsole("unable to get a recording line");
    }
    Dataline = targetDataLine;
    AudioInputStream = new AudioInputStream(Dataline);
    AudioMonitorStream = new AudioInputStream(Dataline);
}
Example 60
Project: interval-music-compositor-master  File: AudioFile.java View source code
public void createCache() throws UnsupportedAudioFileException, IOException {
    setStatus(AudioFileStatus.IN_PROGRESS);
    File temporaryFile = null;
    try {
        temporaryFile = File.createTempFile(getName(), bundle.getString("imc.temporaryFile.suffix"));
    } catch (IOException e) {
        addDebugMessage(e.getMessage());
    }
    if (temporaryFile != null) {
        cache = temporaryFile;
        AudioInputStream ais = null;
        try {
            ais = decodeSourceFile();
            new WaveAudioFileWriter().write(ais, AudioFileFormat.Type.WAVE, cache);
        } catch (UnsupportedAudioFileException e) {
            setStatus(AudioFileStatus.ERROR);
            errorMessage = "Audio format not supported!";
            cache.delete();
            cache = null;
            throw e;
        } catch (IOException e) {
            setStatus(AudioFileStatus.ERROR);
            errorMessage = "Read / write error!";
            cache.delete();
            cache = null;
            throw e;
        } finally {
            if (ais != null) {
                ais.close();
            }
        }
    } else {
        setStatus(AudioFileStatus.ERROR);
        throw new IOException("Was not able to create temporary cache file.");
    }
    calculateVolumeRatio();
    calculateDuration();
    readBpm();
    Long startCutOff = Long.parseLong(bundle.getString("imc.audio.cutoff.start"));
    Long endCutOff = Long.parseLong(bundle.getString("imc.audio.cutoff.end"));
    // Now check if the track is long enough
    if (duration < startCutOff + endCutOff) {
        setStatus(AudioFileStatus.ERROR);
        errorMessage = "Track too short! (Duration: " + getFormattedTime(duration) + " s)";
    } else {
        setStatus(AudioFileStatus.OK);
    }
}
Example 61
Project: nenya-master  File: OpenALSoundPlayer.java View source code
public Clip loadClip(String path) throws IOException {
    int bundleEnd = path.lastIndexOf(":");
    InputStream sound = _loader.getSound(path.substring(0, bundleEnd), path.substring(bundleEnd + 1));
    if (path.endsWith(".ogg")) {
        try {
            AudioInputStream instream = JavaSoundPlayer.setupAudioStream(sound);
            ByteArrayOutputStream outstream = new ByteArrayOutputStream();
            byte[] buf = new byte[16 * 1024];
            int read;
            do {
                read = instream.read(buf, 0, buf.length);
                if (read >= 0) {
                    outstream.write(buf, 0, read);
                }
            } while (read >= 0);
            byte[] audio = outstream.toByteArray();
            AudioFormat format = instream.getFormat();
            long length = audio.length / format.getFrameSize();
            instream = new AudioInputStream(new ByteArrayInputStream(audio), format, length);
            outstream = new ByteArrayOutputStream();
            AudioSystem.write(instream, AudioFileFormat.Type.WAVE, outstream);
            sound = new ByteArrayInputStream(outstream.toByteArray());
        } catch (Exception e) {
            log.warning("Error decompressing audio clip", "path", path, e);
            return new Clip();
        }
    }
    return new Clip(WaveData.create(sound));
}
Example 62
Project: voipcall-master  File: TestLines.java View source code
private void writeFileOut(AudioInputStream streamToWrite) {
    // ----------------------------------------------------------------------
    // Write results out to a file (specified by the static varible
    // OUT_FILE)
    System.out.println("Attempting to write sound recording to file = " + OUT_FILE);
    File file = new File(OUT_FILE);
    if (file.exists()) {
        System.out.println("File " + OUT_FILE + " already exists.");
        return;
    }
    // AudioFileFormat.Type fileType = AudioFileFormat.Type.AU;
    // AudioFileFormat.Type fileType = AudioFileFormat.Type.AIFF;
    AudioFileFormat.Type fileType = AudioFileFormat.Type.WAVE;
    try {
        streamToWrite.reset();
    } catch (IOException e) {
        System.out.println("Write to file: Failed to reset input to start.");
        e.printStackTrace();
    }
    try {
        if (AudioSystem.write(streamToWrite, fileType, file) == -1) {
            System.out.println("Failed to write to audio file " + OUT_FILE);
        }
    } catch (IOException e) {
        System.out.println("Failed to write to audio file " + OUT_FILE);
        e.printStackTrace();
    }
    System.out.println("Wrote sound to file " + OUT_FILE);
}
Example 63
Project: acs-master  File: AlarmSound.java View source code
/**
	 * Dump info about supported audio, file types and so on...
	 * <P>
	 * This method is useful while updating the audio files.
	 */
private void dumpAudioInformation() {
    // Java supported file types
    AudioFileFormat.Type[] fileTypes = AudioSystem.getAudioFileTypes();
    if (fileTypes == null || fileTypes.length == 0) {
        System.out.println("No audio file types supported.");
    } else {
        for (AudioFileFormat.Type type : fileTypes) {
            System.out.println(type.toString() + ", extension " + type.getExtension());
        }
    }
    Mixer.Info[] mixerInfos = AudioSystem.getMixerInfo();
    System.out.println("Mixers found: " + mixerInfos.length);
    for (Mixer.Info mi : mixerInfos) {
        System.out.println("\tMixer " + mi.getName() + ": " + mi.getVendor() + ", " + mi.getDescription());
    }
    // Dump info about the alarm files
    for (URL url : soundURLs) {
        AudioFileFormat format = null;
        try {
            format = AudioSystem.getAudioFileFormat(url);
        } catch (IOException ioe) {
            System.err.println("Error " + ioe.getMessage() + " accessing URL " + url.toString());
            continue;
        } catch (UnsupportedAudioFileException ue) {
            System.err.println("Unsupported audio format for " + url + " (" + ue.getMessage() + ")");
        }
        System.out.println("Properties of " + url);
        System.out.println("\tAudio file type " + format.getType().toString());
        System.out.println("\tIs file type supported: " + AudioSystem.isFileTypeSupported(format.getType()));
        System.out.println("\tLength in byes " + format.getByteLength());
        Map<String, Object> props = format.properties();
        Set<String> keys = props.keySet();
        for (String str : keys) {
            System.out.println("\t[" + str + ", " + props.get(str).toString() + "]");
        }
        AudioFormat aFormat = format.getFormat();
        System.out.println("\tEncoding " + aFormat.getEncoding().toString());
        System.out.print("\tByte order ");
        if (aFormat.isBigEndian()) {
            System.out.println("big endian");
        } else {
            System.out.println("little endian");
        }
        System.out.println("\tSample rate: " + aFormat.getSampleRate());
        System.out.println("\tNum. of bits of a sample: " + aFormat.getSampleSizeInBits());
        System.out.println("\tNum. of channels: " + aFormat.getChannels());
    }
}
Example 64
Project: algs4-master  File: StdAudio.java View source code
/**
     * Saves the double array as an audio file (using .wav or .au format).
     *
     * @param  filename the name of the audio file
     * @param  samples the array of samples
     * @throws IllegalArgumentException if unable to save {@code filename}
     * @throws IllegalArgumentException if {@code samples} is {@code null}
     */
public static void save(String filename, double[] samples) {
    if (samples == null) {
        throw new IllegalArgumentException("samples[] is null");
    }
    // assumes 44,100 samples per second
    // use 16-bit audio, mono, signed PCM, little Endian
    AudioFormat format = new AudioFormat(SAMPLE_RATE, 16, 1, true, false);
    byte[] data = new byte[2 * samples.length];
    for (int i = 0; i < samples.length; i++) {
        int temp = (short) (samples[i] * MAX_16_BIT);
        data[2 * i + 0] = (byte) temp;
        data[2 * i + 1] = (byte) (temp >> 8);
    }
    // now save the file
    try {
        ByteArrayInputStream bais = new ByteArrayInputStream(data);
        AudioInputStream ais = new AudioInputStream(bais, format, samples.length);
        if (filename.endsWith(".wav") || filename.endsWith(".WAV")) {
            AudioSystem.write(ais, AudioFileFormat.Type.WAVE, new File(filename));
        } else if (filename.endsWith(".au") || filename.endsWith(".AU")) {
            AudioSystem.write(ais, AudioFileFormat.Type.AU, new File(filename));
        } else {
            throw new IllegalArgumentException("unsupported audio format: '" + filename + "'");
        }
    } catch (IOException ioe) {
        throw new IllegalArgumentException("unable to save file '" + filename + "'", ioe);
    }
}
Example 65
Project: hudson_plugins-master  File: HudsonSoundsNotifier.java View source code
protected static TreeMap<String, SoundBite> rebuildSoundsIndex(String urlString) {
    final TreeMap<String, SoundBite> index = new TreeMap<String, SoundBite>();
    try {
        URL url = new URL(urlString);
        URLConnection connection = url.openConnection();
        ZipInputStream zipInputStream = new ZipInputStream(connection.getInputStream());
        try {
            ZipEntry entry;
            while ((entry = zipInputStream.getNextEntry()) != null) {
                if (!entry.isDirectory()) {
                    final String id = getBiteName(entry.getName());
                    AudioFileFormat f = null;
                    try {
                        f = AudioSystem.getAudioFileFormat(new BufferedInputStream(zipInputStream));
                    } catch (UnsupportedAudioFileException e) {
                    }
                    index.put(id, new SoundBite(id, entry.getName(), urlString, f));
                }
            }
        } finally {
            IOUtils.closeQuietly(zipInputStream);
        }
    } catch (Exception e) {
    }
    return index;
}
Example 66
Project: LanguageBuddy-master  File: Recorder.java View source code
public static AudioFileFormat.Type fileFormatFromExtension(File file) {
    AudioFileFormat.Type fileFormat = null;
    if (file.getName().toLowerCase().endsWith(".mp3")) {
        fileFormat = AudioFileTypes.getType("MP3", "mp3");
    } else if (file.getName().toLowerCase().endsWith(".wav")) {
        fileFormat = AudioFileFormat.Type.WAVE;
    } else if (file.getName().toLowerCase().endsWith(".au")) {
        fileFormat = AudioFileFormat.Type.AU;
    }
    return fileFormat;
}
Example 67
Project: Muse-Controller-master  File: JavaPandoraPlayer.java View source code
public void run() {
    try {
        byte[] buf = new byte[8192];
        while (true) {
            int length = inputStream.read(buf);
            if (length < 0) {
                break;
            }
            totalBytes += length;
            bigBuffer.write(buf, 0, length);
            bigBuffer.flush();
            if (totalBytes > 64000) {
                synchronized (monitor) {
                    monitor.notify();
                }
            }
        }
    } catch (IOException e) {
        logger.log(Level.WARNING, "Exception caught.", e);
    } finally {
        if (inputStream != null) {
            try {
                inputStream.close();
            } catch (IOException e) {
                logger.log(Level.WARNING, "Exception caught.", e);
            }
        }
        try {
            AudioFileFormat format = AudioSystem.getAudioFileFormat(tempFile);
            totalTime = getTimeLengthEstimation(format.properties());
        } catch (Exception e) {
            logger.log(Level.INFO, "skipping audio file properties due to error.", e);
        }
    }
}
Example 68
Project: riff-wav-for-java-master  File: WavPackageImpl.java View source code
/**
	 * Complete the initialization of the package and its meta-model.  This
	 * method is guarded to have no affect on any invocation but its first.
	 * <!-- begin-user-doc -->
	 * <!-- end-user-doc -->
	 * @generated
	 */
public void initializePackageContents() {
    if (isInitialized)
        return;
    isInitialized = true;
    // Initialize package
    setName(eNAME);
    setNsPrefix(eNS_PREFIX);
    setNsURI(eNS_URI);
    // Create type parameters
    // Set bounds for type parameters
    // Add supertypes to classes
    chunkCueEClass.getESuperTypes().add(this.getChunk());
    chunkDataEClass.getESuperTypes().add(this.getChunk());
    chunkDataListEClass.getESuperTypes().add(this.getChunk());
    chunkDataListTypeEClass.getESuperTypes().add(this.getChunk());
    chunkDataListTypeLabelEClass.getESuperTypes().add(this.getChunkDataListType());
    chunkDataListTypeLabeledTextEClass.getESuperTypes().add(this.getChunkDataListType());
    chunkDataListTypeNoteEClass.getESuperTypes().add(this.getChunkDataListType());
    chunkFactEClass.getESuperTypes().add(this.getChunk());
    chunkFormatEClass.getESuperTypes().add(this.getChunk());
    chunkInstrumentEClass.getESuperTypes().add(this.getChunk());
    chunkPlayListEClass.getESuperTypes().add(this.getChunk());
    chunkSamplerEClass.getESuperTypes().add(this.getChunk());
    chunkSilentEClass.getESuperTypes().add(this.getChunk());
    chunkUnknownEClass.getESuperTypes().add(this.getChunk());
    chunkWaveListEClass.getESuperTypes().add(this.getChunk());
    sampleData8BitEClass.getESuperTypes().add(this.getSampleData());
    sampleData16BitEClass.getESuperTypes().add(this.getSampleData());
    // Initialize classes and features; add operations and parameters
    initEClass(riffWaveEClass, RIFFWave.class, "RIFFWave", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEReference(getRIFFWave_Chunks(), this.getChunk(), null, "chunks", null, 0, -1, RIFFWave.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, IS_COMPOSITE, !IS_RESOLVE_PROXIES, !IS_UNSETTABLE, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEReference(getRIFFWave_ParseChunkExceptions(), this.getParseChunkException(), null, "parseChunkExceptions", null, 0, -1, RIFFWave.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, IS_COMPOSITE, !IS_RESOLVE_PROXIES, !IS_UNSETTABLE, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getRIFFWave_Size(), ecorePackage.getELong(), "size", null, 0, 1, RIFFWave.class, IS_TRANSIENT, IS_VOLATILE, !IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, IS_DERIVED, IS_ORDERED);
    EOperation op = addEOperation(riffWaveEClass, null, "getChunksByEClass", 0, 1, IS_UNIQUE, IS_ORDERED);
    addEParameter(op, ecorePackage.getEClass(), "eClass", 0, 1, IS_UNIQUE, IS_ORDERED);
    EGenericType g1 = createEGenericType(ecorePackage.getEEList());
    EGenericType g2 = createEGenericType(this.getChunk());
    g1.getETypeArguments().add(g2);
    initEOperation(op, g1);
    op = addEOperation(riffWaveEClass, this.getChunk(), "getFirstChunkByEClass", 0, 1, IS_UNIQUE, IS_ORDERED);
    addEParameter(op, ecorePackage.getEClass(), "eClass", 0, 1, IS_UNIQUE, IS_ORDERED);
    op = addEOperation(riffWaveEClass, null, "init", 0, 1, IS_UNIQUE, IS_ORDERED);
    addEParameter(op, this.getExtendedByteBuffer(), "buf", 0, 1, IS_UNIQUE, IS_ORDERED);
    addEException(op, this.getRiffWaveException());
    op = addEOperation(riffWaveEClass, this.getAudioFileFormat(), "toAudioFileFormat", 0, 1, IS_UNIQUE, IS_ORDERED);
    addEException(op, this.getUnsupportedAudioFileException());
    op = addEOperation(riffWaveEClass, this.getAudioFormat(), "toAudioFormat", 0, 1, IS_UNIQUE, IS_ORDERED);
    addEException(op, this.getUnsupportedAudioFileException());
    op = addEOperation(riffWaveEClass, this.getAudioInputStream(), "toAudioInputStream", 0, 1, IS_UNIQUE, IS_ORDERED);
    addEException(op, this.getUnsupportedAudioFileException());
    op = addEOperation(riffWaveEClass, ecorePackage.getEByteArray(), "toByteArray", 0, 1, IS_UNIQUE, IS_ORDERED);
    addEException(op, this.getRiffWaveException());
    op = addEOperation(riffWaveEClass, null, "write", 0, 1, IS_UNIQUE, IS_ORDERED);
    addEParameter(op, this.getFile(), "file", 0, 1, IS_UNIQUE, IS_ORDERED);
    addEException(op, this.getIOException());
    addEException(op, this.getRiffWaveException());
    initEClass(channelEClass, Channel.class, "Channel", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEReference(getChannel_SampleData(), this.getSampleData(), null, "sampleData", null, 0, 1, Channel.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, IS_COMPOSITE, !IS_RESOLVE_PROXIES, !IS_UNSETTABLE, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEClass(chunkEClass, Chunk.class, "Chunk", IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEAttribute(getChunk_BlockAlignedSize(), ecorePackage.getELong(), "blockAlignedSize", null, 0, 1, Chunk.class, IS_TRANSIENT, IS_VOLATILE, !IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunk_Size(), ecorePackage.getELong(), "size", null, 0, 1, Chunk.class, IS_TRANSIENT, IS_VOLATILE, !IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunk_ChunkTypeID(), this.getChunkTypeID(), "chunkTypeID", null, 0, 1, Chunk.class, IS_TRANSIENT, IS_VOLATILE, !IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunk_ChunkTypeIDValue(), ecorePackage.getEInt(), "chunkTypeIDValue", null, 0, 1, Chunk.class, IS_TRANSIENT, IS_VOLATILE, !IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, IS_DERIVED, IS_ORDERED);
    op = addEOperation(chunkEClass, null, "init", 0, 1, IS_UNIQUE, IS_ORDERED);
    addEParameter(op, this.getRIFFWave(), "riffWave", 0, 1, IS_UNIQUE, IS_ORDERED);
    addEParameter(op, this.getExtendedByteBuffer(), "buf", 0, 1, IS_UNIQUE, IS_ORDERED);
    addEException(op, this.getRiffWaveException());
    op = addEOperation(chunkEClass, ecorePackage.getEByteArray(), "toByteArray", 0, 1, IS_UNIQUE, IS_ORDERED);
    addEException(op, this.getRiffWaveException());
    initEClass(chunkCueEClass, ChunkCue.class, "ChunkCue", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEAttribute(getChunkCue_NumberOfCuePoints(), this.getUnsignedInt(), "numberOfCuePoints", null, 0, 1, ChunkCue.class, IS_TRANSIENT, IS_VOLATILE, !IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, IS_DERIVED, IS_ORDERED);
    initEReference(getChunkCue_CuePoints(), this.getCuePoint(), null, "cuePoints", null, 0, -1, ChunkCue.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_COMPOSITE, IS_RESOLVE_PROXIES, !IS_UNSETTABLE, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEClass(chunkDataEClass, ChunkData.class, "ChunkData", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEReference(getChunkData_Channels(), this.getChannel(), null, "channels", null, 0, -1, ChunkData.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, IS_COMPOSITE, !IS_RESOLVE_PROXIES, !IS_UNSETTABLE, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkData_SampleDataOriginal(), ecorePackage.getEByteArray(), "sampleDataOriginal", null, 0, 1, ChunkData.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEClass(chunkDataListEClass, ChunkDataList.class, "ChunkDataList", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEAttribute(getChunkDataList_TypeID(), this.getChunkDataListTypeID(), "typeID", null, 0, 1, ChunkDataList.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEReference(getChunkDataList_DataListChunks(), this.getChunkDataListType(), null, "dataListChunks", null, 0, -1, ChunkDataList.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_COMPOSITE, IS_RESOLVE_PROXIES, !IS_UNSETTABLE, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEClass(chunkDataListTypeEClass, ChunkDataListType.class, "ChunkDataListType", IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEAttribute(getChunkDataListType_CuePointID(), this.getUnsignedInt(), "cuePointID", null, 0, 1, ChunkDataListType.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkDataListType_Text(), ecorePackage.getEByteArray(), "text", null, 0, 1, ChunkDataListType.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkDataListType_TextAsString(), ecorePackage.getEString(), "textAsString", null, 0, 1, ChunkDataListType.class, IS_TRANSIENT, IS_VOLATILE, !IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, IS_DERIVED, IS_ORDERED);
    initEClass(chunkDataListTypeLabelEClass, ChunkDataListTypeLabel.class, "ChunkDataListTypeLabel", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEClass(chunkDataListTypeLabeledTextEClass, ChunkDataListTypeLabeledText.class, "ChunkDataListTypeLabeledText", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEAttribute(getChunkDataListTypeLabeledText_SampleLength(), this.getUnsignedInt(), "sampleLength", null, 0, 1, ChunkDataListTypeLabeledText.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkDataListTypeLabeledText_PurposeID(), this.getUnsignedInt(), "purposeID", null, 0, 1, ChunkDataListTypeLabeledText.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkDataListTypeLabeledText_Country(), this.getUnsignedShort(), "country", null, 0, 1, ChunkDataListTypeLabeledText.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkDataListTypeLabeledText_Language(), this.getUnsignedShort(), "language", null, 0, 1, ChunkDataListTypeLabeledText.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkDataListTypeLabeledText_Dialect(), this.getUnsignedShort(), "dialect", null, 0, 1, ChunkDataListTypeLabeledText.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkDataListTypeLabeledText_CodePage(), this.getUnsignedShort(), "codePage", null, 0, 1, ChunkDataListTypeLabeledText.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEClass(chunkDataListTypeNoteEClass, ChunkDataListTypeNote.class, "ChunkDataListTypeNote", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEClass(chunkFactEClass, ChunkFact.class, "ChunkFact", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEAttribute(getChunkFact_FormatDependantData(), ecorePackage.getEByteArray(), "formatDependantData", null, 0, 1, ChunkFact.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEClass(chunkFormatEClass, ChunkFormat.class, "ChunkFormat", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEAttribute(getChunkFormat_CompressionCode(), this.getCompressionCode(), "compressionCode", null, 0, 1, ChunkFormat.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkFormat_CompressionCodeValue(), this.getUnsignedShort(), "compressionCodeValue", null, 0, 1, ChunkFormat.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkFormat_NumberOfChannels(), this.getUnsignedShort(), "numberOfChannels", null, 0, 1, ChunkFormat.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkFormat_SampleRate(), this.getUnsignedInt(), "sampleRate", null, 0, 1, ChunkFormat.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkFormat_AverageBytesPerSecond(), this.getUnsignedInt(), "averageBytesPerSecond", null, 0, 1, ChunkFormat.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkFormat_BlockAlign(), this.getUnsignedShort(), "blockAlign", null, 0, 1, ChunkFormat.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkFormat_SignificantBitsPerSample(), this.getUnsignedShort(), "significantBitsPerSample", null, 0, 1, ChunkFormat.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkFormat_NumberOfExtraFormatBytes(), this.getUnsignedShort(), "numberOfExtraFormatBytes", null, 0, 1, ChunkFormat.class, IS_TRANSIENT, IS_VOLATILE, !IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkFormat_ExtraFormatBytes(), ecorePackage.getEByteArray(), "extraFormatBytes", null, 0, 1, ChunkFormat.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEClass(chunkInstrumentEClass, ChunkInstrument.class, "ChunkInstrument", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEAttribute(getChunkInstrument_UnshiftedNote(), ecorePackage.getEByte(), "unshiftedNote", null, 0, 1, ChunkInstrument.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkInstrument_FineTune(), ecorePackage.getEByte(), "fineTune", null, 0, 1, ChunkInstrument.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkInstrument_Gain(), ecorePackage.getEByte(), "gain", null, 0, 1, ChunkInstrument.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkInstrument_LowNote(), ecorePackage.getEByte(), "lowNote", null, 0, 1, ChunkInstrument.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkInstrument_HighNote(), ecorePackage.getEByte(), "highNote", null, 0, 1, ChunkInstrument.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkInstrument_LowVelocity(), ecorePackage.getEByte(), "lowVelocity", null, 0, 1, ChunkInstrument.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkInstrument_HighVelocity(), ecorePackage.getEByte(), "highVelocity", null, 0, 1, ChunkInstrument.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEClass(chunkPlayListEClass, ChunkPlayList.class, "ChunkPlayList", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEAttribute(getChunkPlayList_NumberOfSegments(), this.getUnsignedInt(), "numberOfSegments", null, 0, 1, ChunkPlayList.class, IS_TRANSIENT, IS_VOLATILE, !IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, IS_DERIVED, IS_ORDERED);
    initEReference(getChunkPlayList_Segments(), this.getSegment(), null, "segments", null, 0, -1, ChunkPlayList.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_COMPOSITE, IS_RESOLVE_PROXIES, !IS_UNSETTABLE, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEClass(chunkSamplerEClass, ChunkSampler.class, "ChunkSampler", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEAttribute(getChunkSampler_Manufacturer(), this.getUnsignedInt(), "manufacturer", null, 0, 1, ChunkSampler.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkSampler_Product(), this.getUnsignedInt(), "product", null, 0, 1, ChunkSampler.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkSampler_SamplePeriod(), this.getUnsignedInt(), "samplePeriod", null, 0, 1, ChunkSampler.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkSampler_MidiUnityNote(), this.getUnsignedInt(), "midiUnityNote", null, 0, 1, ChunkSampler.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkSampler_MidiPitchFraction(), this.getUnsignedInt(), "midiPitchFraction", null, 0, 1, ChunkSampler.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkSampler_SmpteFormat(), this.getUnsignedInt(), "smpteFormat", null, 0, 1, ChunkSampler.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkSampler_SmpteOffset(), this.getUnsignedInt(), "smpteOffset", null, 0, 1, ChunkSampler.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkSampler_NumberOfSampleLoops(), this.getUnsignedInt(), "numberOfSampleLoops", null, 0, 1, ChunkSampler.class, IS_TRANSIENT, IS_VOLATILE, !IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkSampler_SamplerDataSize(), this.getUnsignedInt(), "samplerDataSize", null, 0, 1, ChunkSampler.class, IS_TRANSIENT, IS_VOLATILE, !IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, IS_DERIVED, IS_ORDERED);
    initEReference(getChunkSampler_SampleLoops(), this.getSampleLoop(), null, "sampleLoops", null, 0, -1, ChunkSampler.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, IS_COMPOSITE, !IS_RESOLVE_PROXIES, !IS_UNSETTABLE, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkSampler_SamplerData(), ecorePackage.getEByteArray(), "samplerData", null, 0, 1, ChunkSampler.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEClass(chunkSilentEClass, ChunkSilent.class, "ChunkSilent", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEAttribute(getChunkSilent_NumberOfSilentSamples(), this.getUnsignedInt(), "numberOfSilentSamples", null, 0, 1, ChunkSilent.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEClass(chunkUnknownEClass, ChunkUnknown.class, "ChunkUnknown", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEAttribute(getChunkUnknown_Data(), ecorePackage.getEByteArray(), "data", null, 0, 1, ChunkUnknown.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkUnknown_UnknownChunkTypeIdValue(), this.getUnsignedInt(), "unknownChunkTypeIdValue", null, 0, 1, ChunkUnknown.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getChunkUnknown_WavRandomAccessFilePointer(), ecorePackage.getELong(), "wavRandomAccessFilePointer", null, 0, 1, ChunkUnknown.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEClass(chunkWaveListEClass, ChunkWaveList.class, "ChunkWaveList", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEReference(getChunkWaveList_AlternatingSilentAndDataChunks(), this.getChunk(), null, "alternatingSilentAndDataChunks", null, 0, -1, ChunkWaveList.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, IS_COMPOSITE, !IS_RESOLVE_PROXIES, !IS_UNSETTABLE, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEClass(cuePointEClass, CuePoint.class, "CuePoint", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEAttribute(getCuePoint_CuePointID(), this.getUnsignedInt(), "cuePointID", null, 0, 1, CuePoint.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getCuePoint_Position(), this.getUnsignedInt(), "position", null, 0, 1, CuePoint.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getCuePoint_DataChunkID(), this.getUnsignedInt(), "dataChunkID", null, 0, 1, CuePoint.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getCuePoint_ChunkStart(), this.getUnsignedInt(), "chunkStart", null, 0, 1, CuePoint.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getCuePoint_BlockStart(), this.getUnsignedInt(), "blockStart", null, 0, 1, CuePoint.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getCuePoint_SampleOffset(), this.getUnsignedInt(), "sampleOffset", null, 0, 1, CuePoint.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEClass(parseChunkExceptionEClass, ParseChunkException.class, "ParseChunkException", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEAttribute(getParseChunkException_Exception(), this.getException(), "exception", null, 0, 1, ParseChunkException.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getParseChunkException_StringCause(), ecorePackage.getEString(), "stringCause", null, 0, 1, ParseChunkException.class, IS_TRANSIENT, IS_VOLATILE, !IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, IS_DERIVED, IS_ORDERED);
    initEAttribute(getParseChunkException_StringMessage(), ecorePackage.getEString(), "stringMessage", null, 0, 1, ParseChunkException.class, IS_TRANSIENT, IS_VOLATILE, !IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, IS_DERIVED, IS_ORDERED);
    initEAttribute(getParseChunkException_StringStackTrace(), ecorePackage.getEString(), "stringStackTrace", null, 0, 1, ParseChunkException.class, IS_TRANSIENT, IS_VOLATILE, !IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, IS_DERIVED, IS_ORDERED);
    initEClass(sampleDataEClass, SampleData.class, "SampleData", IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEClass(sampleData8BitEClass, SampleData8Bit.class, "SampleData8Bit", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEAttribute(getSampleData8Bit_Sample(), ecorePackage.getEByte(), "sample", null, 0, 1, SampleData8Bit.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEClass(sampleData16BitEClass, SampleData16Bit.class, "SampleData16Bit", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEAttribute(getSampleData16Bit_Sample(), ecorePackage.getEShort(), "sample", null, 0, 1, SampleData16Bit.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEClass(sampleLoopEClass, SampleLoop.class, "SampleLoop", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEAttribute(getSampleLoop_CuePointID(), this.getUnsignedInt(), "cuePointID", null, 0, 1, SampleLoop.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getSampleLoop_Type(), this.getUnsignedInt(), "type", null, 0, 1, SampleLoop.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getSampleLoop_Start(), this.getUnsignedInt(), "start", null, 0, 1, SampleLoop.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getSampleLoop_End(), this.getUnsignedInt(), "end", null, 0, 1, SampleLoop.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getSampleLoop_Fraction(), this.getUnsignedInt(), "fraction", null, 0, 1, SampleLoop.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getSampleLoop_PlayCount(), this.getUnsignedInt(), "playCount", null, 0, 1, SampleLoop.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEClass(segmentEClass, Segment.class, "Segment", !IS_ABSTRACT, !IS_INTERFACE, IS_GENERATED_INSTANCE_CLASS);
    initEAttribute(getSegment_CuePointID(), this.getUnsignedInt(), "cuePointID", null, 0, 1, Segment.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getSegment_LengthInSamples(), this.getUnsignedInt(), "lengthInSamples", null, 0, 1, Segment.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    initEAttribute(getSegment_NumberOfRepeats(), this.getUnsignedInt(), "numberOfRepeats", null, 0, 1, Segment.class, !IS_TRANSIENT, !IS_VOLATILE, IS_CHANGEABLE, !IS_UNSETTABLE, !IS_ID, IS_UNIQUE, !IS_DERIVED, IS_ORDERED);
    // Initialize enums and add enum literals
    initEEnum(chunkDataListTypeIDEEnum, ChunkDataListTypeID.class, "ChunkDataListTypeID");
    addEEnumLiteral(chunkDataListTypeIDEEnum, ChunkDataListTypeID.UNKNOWN);
    addEEnumLiteral(chunkDataListTypeIDEEnum, ChunkDataListTypeID.ADTL);
    initEEnum(chunkTypeIDEEnum, ChunkTypeID.class, "ChunkTypeID");
    addEEnumLiteral(chunkTypeIDEEnum, ChunkTypeID.UNKNOWN);
    addEEnumLiteral(chunkTypeIDEEnum, ChunkTypeID.RIFF);
    addEEnumLiteral(chunkTypeIDEEnum, ChunkTypeID.WAVE);
    addEEnumLiteral(chunkTypeIDEEnum, ChunkTypeID.CUE_);
    addEEnumLiteral(chunkTypeIDEEnum, ChunkTypeID.DATA);
    addEEnumLiteral(chunkTypeIDEEnum, ChunkTypeID.FACT);
    addEEnumLiteral(chunkTypeIDEEnum, ChunkTypeID.FMT_);
    addEEnumLiteral(chunkTypeIDEEnum, ChunkTypeID.INST);
    addEEnumLiteral(chunkTypeIDEEnum, ChunkTypeID.LABL);
    addEEnumLiteral(chunkTypeIDEEnum, ChunkTypeID.LIST);
    addEEnumLiteral(chunkTypeIDEEnum, ChunkTypeID.LTXT);
    addEEnumLiteral(chunkTypeIDEEnum, ChunkTypeID.NOTE);
    addEEnumLiteral(chunkTypeIDEEnum, ChunkTypeID.PLST);
    addEEnumLiteral(chunkTypeIDEEnum, ChunkTypeID.SINT);
    addEEnumLiteral(chunkTypeIDEEnum, ChunkTypeID.SMPL);
    addEEnumLiteral(chunkTypeIDEEnum, ChunkTypeID.WAVL);
    initEEnum(compressionCodeEEnum, CompressionCode.class, "CompressionCode");
    addEEnumLiteral(compressionCodeEEnum, CompressionCode.COMPRESSION_CODE_0);
    addEEnumLiteral(compressionCodeEEnum, CompressionCode.COMPRESSION_CODE_1);
    addEEnumLiteral(compressionCodeEEnum, CompressionCode.COMPRESSION_CODE_2);
    addEEnumLiteral(compressionCodeEEnum, CompressionCode.COMPRESSION_CODE_6);
    addEEnumLiteral(compressionCodeEEnum, CompressionCode.COMPRESSION_CODE_7);
    addEEnumLiteral(compressionCodeEEnum, CompressionCode.COMPRESSION_CODE_17);
    addEEnumLiteral(compressionCodeEEnum, CompressionCode.COMPRESSION_CODE_20);
    addEEnumLiteral(compressionCodeEEnum, CompressionCode.COMPRESSION_CODE_49);
    addEEnumLiteral(compressionCodeEEnum, CompressionCode.COMPRESSION_CODE_64);
    addEEnumLiteral(compressionCodeEEnum, CompressionCode.COMPRESSION_CODE_80);
    addEEnumLiteral(compressionCodeEEnum, CompressionCode.COMPRESSION_CODE_65536);
    initEEnum(sampleLoopTypeEEnum, SampleLoopType.class, "SampleLoopType");
    addEEnumLiteral(sampleLoopTypeEEnum, SampleLoopType.UNKNOWN);
    addEEnumLiteral(sampleLoopTypeEEnum, SampleLoopType.FORWARD);
    addEEnumLiteral(sampleLoopTypeEEnum, SampleLoopType.PING_PONG);
    addEEnumLiteral(sampleLoopTypeEEnum, SampleLoopType.BACKWARD);
    initEEnum(smpteFormatEEnum, SMPTEFormat.class, "SMPTEFormat");
    addEEnumLiteral(smpteFormatEEnum, SMPTEFormat.SMPTE_0);
    addEEnumLiteral(smpteFormatEEnum, SMPTEFormat.SMPTE_24);
    addEEnumLiteral(smpteFormatEEnum, SMPTEFormat.SMPTE_25);
    addEEnumLiteral(smpteFormatEEnum, SMPTEFormat.SMPTE_29);
    addEEnumLiteral(smpteFormatEEnum, SMPTEFormat.SMPTE_30);
    // Initialize data types
    initEDataType(audioFileFormatEDataType, AudioFileFormat.class, "AudioFileFormat", IS_SERIALIZABLE, !IS_GENERATED_INSTANCE_CLASS);
    initEDataType(audioFormatEDataType, AudioFormat.class, "AudioFormat", IS_SERIALIZABLE, !IS_GENERATED_INSTANCE_CLASS);
    initEDataType(audioInputStreamEDataType, AudioInputStream.class, "AudioInputStream", IS_SERIALIZABLE, !IS_GENERATED_INSTANCE_CLASS);
    initEDataType(exceptionEDataType, Exception.class, "Exception", IS_SERIALIZABLE, !IS_GENERATED_INSTANCE_CLASS);
    initEDataType(extendedByteBufferEDataType, ExtendedByteBuffer.class, "ExtendedByteBuffer", IS_SERIALIZABLE, !IS_GENERATED_INSTANCE_CLASS);
    initEDataType(fileEDataType, File.class, "File", IS_SERIALIZABLE, !IS_GENERATED_INSTANCE_CLASS);
    initEDataType(ioExceptionEDataType, IOException.class, "IOException", IS_SERIALIZABLE, !IS_GENERATED_INSTANCE_CLASS);
    initEDataType(riffWaveExceptionEDataType, RiffWaveException.class, "RiffWaveException", IS_SERIALIZABLE, !IS_GENERATED_INSTANCE_CLASS);
    initEDataType(unsignedShortEDataType, Integer.class, "UnsignedShort", IS_SERIALIZABLE, !IS_GENERATED_INSTANCE_CLASS);
    initEDataType(unsignedIntEDataType, Long.class, "UnsignedInt", IS_SERIALIZABLE, !IS_GENERATED_INSTANCE_CLASS);
    initEDataType(unsupportedAudioFileExceptionEDataType, UnsupportedAudioFileException.class, "UnsupportedAudioFileException", IS_SERIALIZABLE, !IS_GENERATED_INSTANCE_CLASS);
    // Create resource
    createResource(eNS_URI);
}
Example 69
Project: StegDroid-master  File: WaveFileWriter.java View source code
// METHODS TO IMPLEMENT AudioFileWriter
@Override
public AudioFileFormat.Type[] getAudioFileTypes(AudioInputStream stream) {
    AudioFileFormat.Type[] filetypes = new AudioFileFormat.Type[types.length];
    System.arraycopy(types, 0, filetypes, 0, types.length);
    // make sure we can write this stream
    AudioFormat format = stream.getFormat();
    AudioFormat.Encoding encoding = format.getEncoding();
    if (AudioFormat.Encoding.ALAW.equals(encoding) || AudioFormat.Encoding.ULAW.equals(encoding) || AudioFormat.Encoding.PCM_SIGNED.equals(encoding) || AudioFormat.Encoding.PCM_UNSIGNED.equals(encoding)) {
        return filetypes;
    }
    return new AudioFileFormat.Type[0];
}
Example 70
Project: Weasis-master  File: AuView.java View source code
private void saveAudioFile(DicomSpecialElement media) {
    AudioInputStream stream = getAudioInputStream(media);
    if (stream != null) {
        JFileChooser fileChooser = new JFileChooser();
        fileChooser.setFileSelectionMode(JFileChooser.FILES_ONLY);
        fileChooser.setAcceptAllFileFilterUsed(false);
        //$NON-NLS-1$ //$NON-NLS-2$
        FileFormatFilter filter = new FileFormatFilter("au", "AU");
        fileChooser.addChoosableFileFilter(filter);
        //$NON-NLS-1$ //$NON-NLS-2$
        fileChooser.addChoosableFileFilter(new FileFormatFilter("wav", "WAVE"));
        fileChooser.setFileFilter(filter);
        if (fileChooser.showSaveDialog(null) == JFileChooser.APPROVE_OPTION) {
            if (fileChooser.getSelectedFile() != null) {
                File file = fileChooser.getSelectedFile();
                filter = (FileFormatFilter) fileChooser.getFileFilter();
                //$NON-NLS-1$ //$NON-NLS-2$
                String extension = filter == null ? ".au" : "." + filter.getDefaultExtension();
                String filename = file.getName().endsWith(extension) ? file.getPath() : file.getPath() + extension;
                try {
                    if (".wav".equals(extension)) {
                        //$NON-NLS-1$
                        AudioSystem.write(stream, AudioFileFormat.Type.WAVE, new File(filename));
                    } else {
                        AudioSystem.write(stream, AudioFileFormat.Type.AU, new File(filename));
                    }
                } catch (IOException ex) {
                    LOGGER.error("Cannot save audio file!", ex);
                }
            }
        }
    }
}
Example 71
Project: hiena-mp3-player-master  File: ABasicPlayer.java View source code
//---------------------------------------------
public final void dbg() {
    System.out.println("[ AudioFileFormat = " + getAudioFileFormat());
    System.out.println("[ AudioFormat = " + getAudioFormat());
    System.out.println("[ BitRate = " + getBitRate());
    System.out.println("[ Gain = " + getGain());
    System.out.println("[ Maximum = " + getMaximum());
    System.out.println("[ Minimum = " + getMinimum());
    System.out.println("[ Pan = " + getPan());
    System.out.println("[ Precision = " + getPrecision());
    System.out.println("[ Status = " + getStatus());
}
Example 72
Project: idac-master  File: MpegAudioFileReader.java View source code
/**
	 * Returns AudioFileFormat from URL.
	 */
public AudioFileFormat getAudioFileFormat(URL url) throws UnsupportedAudioFileException, IOException {
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): begin");
    }
    long lFileLengthInBytes = AudioSystem.NOT_SPECIFIED;
    URLConnection conn = url.openConnection();
    // Tell shoucast server (if any) that SPI support shoutcast stream.
    conn.setRequestProperty("Icy-Metadata", "1");
    InputStream inputStream = conn.getInputStream();
    AudioFileFormat audioFileFormat = null;
    try {
        audioFileFormat = getAudioFileFormat(inputStream, lFileLengthInBytes);
    } finally {
        inputStream.close();
    }
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): end");
    }
    return audioFileFormat;
}
Example 73
Project: LimeWire-Pirate-Edition-master  File: LimeAudioFormat.java View source code
/**
     * Creates a map of properties about the current inputstream. Unlike many inputStreams,
     * audioInputStreams have a variety of extra properties associated with them such as
     * <pre>
     *  - frame size
     *  - sample rate
     *  - frames per second
     *  - audio type
     *  - length in # of frames
     *  - # of audio channels
     *  - etc.
     * </pre>
     * This information is often useful to the application that initiated the song. This information
     * is extracted in case another class wishes to use it. 
     * 
     * @param source the audio source that the audioInputStream is created from for reading
     * @return a Map<String,Object> containing properties about the audio source
     */
private static Map<String, Object> createProperties(AudioSource source) throws UnsupportedAudioFileException, IOException {
    AudioFileFormat audioFileFormat;
    Map<String, Object> properties = new HashMap<String, Object>();
    if (source.getFile() != null) {
        audioFileFormat = AudioSystem.getAudioFileFormat(source.getFile());
    } else if (source.getStream() != null) {
        audioFileFormat = AudioSystem.getAudioFileFormat(source.getStream());
    } else
        return properties;
    if (audioFileFormat instanceof TAudioFileFormat) {
        // Tritonus SPI compliant audio file format.
        properties = GenericsUtils.scanForMap(((TAudioFileFormat) audioFileFormat).properties(), String.class, Object.class, ScanMode.REMOVE);
        // Clone the Map because it is not mutable.
        Map<String, Object> newMap = new HashMap<String, Object>(properties);
        properties = newMap;
    }
    // Add JavaSound properties.
    if (audioFileFormat.getByteLength() > 0)
        properties.put(AUDIO_LENGTH_BYTES, audioFileFormat.getByteLength());
    if (audioFileFormat.getFrameLength() > 0)
        properties.put(AUDIO_LENGTH_FRAMES, audioFileFormat.getFrameLength());
    if (audioFileFormat.getType() != null)
        properties.put(AUDIO_TYPE, (audioFileFormat.getType().toString()));
    AudioFormat audioFormat = audioFileFormat.getFormat();
    if (audioFormat.getFrameRate() > 0)
        properties.put(AUDIO_FRAMERATE_FPS, audioFormat.getFrameRate());
    if (audioFormat.getFrameSize() > 0)
        properties.put(AUDIO_FRAMESIZE_BYTES, audioFormat.getFrameSize());
    if (audioFormat.getSampleRate() > 0)
        properties.put(AUDIO_SAMPLERATE_HZ, audioFormat.getSampleRate());
    if (audioFormat.getSampleSizeInBits() > 0)
        properties.put(AUDIO_SAMPLESIZE_BITS, audioFormat.getSampleSizeInBits());
    if (audioFormat.getChannels() > 0)
        properties.put(AUDIO_CHANNELS, audioFormat.getChannels());
    if (audioFormat instanceof TAudioFormat) {
        // Tritonus SPI compliant audio format.
        Map<String, Object> addproperties = GenericsUtils.scanForMap(((TAudioFormat) audioFormat).properties(), String.class, Object.class, ScanMode.REMOVE);
        properties.putAll(addproperties);
    }
    return properties;
}
Example 74
Project: limewire5-ruby-master  File: LimeAudioFormat.java View source code
/**
     * Creates a map of properties about the current inputstream. Unlike many inputStreams,
     * audioInputStreams have a variety of extra properties associated with them such as
     * <pre>
     *  - frame size
     *  - sample rate
     *  - frames per second
     *  - audio type
     *  - length in # of frames
     *  - # of audio channels
     *  - etc.
     * </pre>
     * This information is often useful to the application that initiated the song. This information
     * is extracted in case another class wishes to use it. 
     * 
     * @param source the audio source that the audioInputStream is created from for reading
     * @return a Map<String,Object> containing properties about the audio source
     */
private static Map<String, Object> createProperties(AudioSource source) throws UnsupportedAudioFileException, IOException {
    AudioFileFormat audioFileFormat;
    Map<String, Object> properties = new HashMap<String, Object>();
    if (source.getFile() != null) {
        audioFileFormat = AudioSystem.getAudioFileFormat(source.getFile());
    } else if (source.getStream() != null) {
        audioFileFormat = AudioSystem.getAudioFileFormat(source.getStream());
    } else
        return properties;
    if (audioFileFormat instanceof TAudioFileFormat) {
        // Tritonus SPI compliant audio file format.
        properties = GenericsUtils.scanForMap(((TAudioFileFormat) audioFileFormat).properties(), String.class, Object.class, ScanMode.REMOVE);
        // Clone the Map because it is not mutable.
        Map<String, Object> newMap = new HashMap<String, Object>(properties);
        properties = newMap;
    }
    // Add JavaSound properties.
    if (audioFileFormat.getByteLength() > 0)
        properties.put(AUDIO_LENGTH_BYTES, audioFileFormat.getByteLength());
    if (audioFileFormat.getFrameLength() > 0)
        properties.put(AUDIO_LENGTH_FRAMES, audioFileFormat.getFrameLength());
    if (audioFileFormat.getType() != null)
        properties.put(AUDIO_TYPE, (audioFileFormat.getType().toString()));
    AudioFormat audioFormat = audioFileFormat.getFormat();
    if (audioFormat.getFrameRate() > 0)
        properties.put(AUDIO_FRAMERATE_FPS, audioFormat.getFrameRate());
    if (audioFormat.getFrameSize() > 0)
        properties.put(AUDIO_FRAMESIZE_BYTES, audioFormat.getFrameSize());
    if (audioFormat.getSampleRate() > 0)
        properties.put(AUDIO_SAMPLERATE_HZ, audioFormat.getSampleRate());
    if (audioFormat.getSampleSizeInBits() > 0)
        properties.put(AUDIO_SAMPLESIZE_BITS, audioFormat.getSampleSizeInBits());
    if (audioFormat.getChannels() > 0)
        properties.put(AUDIO_CHANNELS, audioFormat.getChannels());
    if (audioFormat instanceof TAudioFormat) {
        // Tritonus SPI compliant audio format.
        Map<String, Object> addproperties = GenericsUtils.scanForMap(((TAudioFormat) audioFormat).properties(), String.class, Object.class, ScanMode.REMOVE);
        properties.putAll(addproperties);
    }
    return properties;
}
Example 75
Project: opsu-master  File: MusicController.java View source code
/**
	 * Returns the duration of the current track, in milliseconds.
	 * Currently only works for MP3s.
	 * @return the duration, or -1 if no track exists, else the {@code endTime}
	 *         field of the beatmap loaded
	 * @author Tom Brito (http://stackoverflow.com/a/3056161)
	 */
public static int getDuration() {
    if (!trackExists() || lastBeatmap == null)
        return -1;
    if (duration == 0) {
        // TAudioFileFormat method only works for MP3s
        if (lastBeatmap.audioFilename.getName().endsWith(".mp3")) {
            try {
                AudioFileFormat fileFormat = AudioSystem.getAudioFileFormat(lastBeatmap.audioFilename);
                if (fileFormat instanceof TAudioFileFormat) {
                    Map<?, ?> properties = ((TAudioFileFormat) fileFormat).properties();
                    Long microseconds = (Long) properties.get("duration");
                    duration = (int) (microseconds / 1000);
                    return duration;
                }
            } catch (UnsupportedAudioFileExceptionIOException |  e) {
            }
        }
        // fallback: use beatmap end time (often not the track duration)
        duration = lastBeatmap.endTime;
    }
    return duration;
}
Example 76
Project: ptii-master  File: SoundPlayback.java View source code
private void _stopPlaybackToFile() throws IOException {
    int size = _toFileBuffer.size();
    byte[] audioBytes = new byte[size];
    for (int i = 0; i < size; i++) {
        Byte j = (Byte) _toFileBuffer.get(i);
        audioBytes[i] = j.byteValue();
    }
    ByteArrayInputStream byteInputArrayStream = null;
    AudioInputStream audioInputStream = null;
    try {
        byteInputArrayStream = new ByteArrayInputStream(audioBytes);
        audioInputStream = new AudioInputStream(byteInputArrayStream, _playToFileFormat, audioBytes.length / _frameSizeInBytes);
        File outFile = new File(_fileName);
        StringTokenizer st = new StringTokenizer(_fileName, ".");
        // Do error checking:
        if (st.countTokens() != 2) {
            throw new IOException("Error: Incorrect " + "file name format. " + "Format: filename.extension");
        }
        // Advance to the file extension.
        st.nextToken();
        String fileExtension = st.nextToken();
        if (fileExtension.equalsIgnoreCase("au")) {
            // Save the file.
            AudioSystem.write(audioInputStream, AudioFileFormat.Type.AU, outFile);
        } else if (fileExtension.equalsIgnoreCase("aiff")) {
            // Save the file.
            AudioSystem.write(audioInputStream, AudioFileFormat.Type.AIFF, outFile);
        } else if (fileExtension.equalsIgnoreCase("wave")) {
            // Save the file.
            AudioSystem.write(audioInputStream, AudioFileFormat.Type.WAVE, outFile);
        } else if (fileExtension.equalsIgnoreCase("wav")) {
            // Save the file.
            AudioSystem.write(audioInputStream, AudioFileFormat.Type.WAVE, outFile);
        } else if (fileExtension.equalsIgnoreCase("aifc")) {
            // Save the file.
            AudioSystem.write(audioInputStream, AudioFileFormat.Type.AIFC, outFile);
        } else {
            throw new IOException("Error saving " + "file: Unknown file format: " + fileExtension);
        }
    } catch (IOException e) {
        throw new IOException("SoundPlayback: error saving" + " file: " + e);
    } finally {
        if (byteInputArrayStream != null) {
            try {
                byteInputArrayStream.close();
            } catch (Throwable throwable) {
                System.out.println("Ignoring failure to close stream " + "on " + audioBytes.length + " bytes of data.");
                throwable.printStackTrace();
            }
        }
        if (audioInputStream != null) {
            try {
                audioInputStream.close();
            } catch (Throwable throwable) {
                System.out.println("Ignoring failure to close stream " + "on " + audioBytes.length + " bytes of data.");
                throwable.printStackTrace();
            }
        }
    }
}
Example 77
Project: Fudan-Sakai-master  File: AudioRecorder.java View source code
/**
	 * Post audio data directly.
	 * 
	 * @param audioType
	 *            the audio type string
	 * @param urlString
	 *            the url (in applets must use getCodeBase().toString() +
	 *            same-host relative url)
	 * @param inputStream
	 *            the input stream
	 * @param attemptsLeft
	 *            attempts left
	 */
public void saveAndPost(InputStream inputStream, final AudioFileFormat.Type audioType, final String urlString, int attemptsLeft, final boolean post) {
    Thread saveAndPostThread = new Thread() {

        public void run() {
            while (audioInputStream == null) {
                try {
                    // politely waiting for capture Thread to finish with audioInputStream.
                    Thread.sleep(1000);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
            // reset to the beginnning of the captured data
            try {
                audioInputStream.reset();
            } catch (Exception ex) {
                reportStatus(res.getString("Unable_to_reset") + ex);
                return;
            }
            if (post)
                postAudio(audioType, urlString);
            if (containingApplet != null) {
                JSObject window = (JSObject) JSObject.getWindow(containingApplet);
                JSObject opener = (JSObject) window.getMember("opener");
                opener.call("clickReloadLink", new Object[] { window });
                window.call("close", null);
            }
        }
    };
    // end of saveAndPostThread
    saveAndPostThread.start();
}
Example 78
Project: krut-master  File: Sampler.java View source code
/** This method sets up all the input streams and output streams
     *  that are used in the recording. This method should be called every
     *  time a parameter affecting those streams has been changed
     *  (eg. sampling frequency). This method does not have to be called
     *  for just a change in save file name, since a temporary buffer file
     *  is used for output during recording.
     *  The method is called by the run method
     *  once every time at the start of recording.
     */
public void init() {
    /*  First of all, make sure the line isn't already running. */
    if (m_line != null)
        stopRecording();
    /*  Setting the AudioFormat */
    sampleSizeInBytes = (int) sampleSize * channels / 8;
    audioFormat = new AudioFormat(AudioFormat.Encoding.PCM_SIGNED, frequency, sampleSize, channels, sampleSizeInBytes, frequency, false);
    /** Trying to get a TargetDataLine. The TargetDataLine
         *  is used later to read audio data from.
         *  If requesting the line was successful, it is opened.
         */
    /** Try to get a buffer of 1s. */
    DataLine.Info info = new DataLine.Info(TargetDataLine.class, audioFormat, (int) (frequency * sampleSizeInBytes));
    TargetDataLine targetDataLine = null;
    try {
        targetDataLine = (TargetDataLine) AudioSystem.getLine(info);
        /** Use the maximum buffersize available. */
        targetDataLine.open(audioFormat, info.getMaxBufferSize());
    } catch (LineUnavailableException e) {
        System.out.println("unable to get a recording line");
        e.printStackTrace();
    }
    /** Set the audio file type. */
    AudioFileFormat.Type targetType = AudioFileFormat.Type.WAVE;
    m_line = targetDataLine;
    m_targetType = targetType;
    bufferSize = m_line.getBufferSize();
    bufferSizeInFrames = bufferSize / sampleSizeInBytes;
    /** This is how many samples of lag will be tolerated compared
         *  to the system clock, before the Sampler compensates
         *  by "stretching" the sample currently in memory
         *  to get back into sync. Stretching done in 
         *  hiTechFill method.
         */
    acceptedLag = (int) frequency / maxLag;
    /** This is how many samples of "speeding" will be tolerated compared
         *  to the system clock, before the Sampler compensates
         *  by dropping part of the sample currently in memory
         *  to get back into sync. Dropping is done in 
         *  hiTechFill method.
         */
    acceptedAhead = (int) frequency / maxAhead;
    /** Data is where the sampled data will end up being read
         *  into from the sample buffer. Reading is done in the
         *  run method.
         */
    data = new byte[bufferSize];
    /** Opening an outputfile, with buffer size
         *  memoryBufferSize, to write audio data into.
         *  This is not the final wav-file, just a temporary
         *  storage.
         */
    try {
        m_outputFile = new File(bufferFileName);
        while (m_outputFile.exists() && !m_outputFile.delete()) m_outputFile = mySaveQuery.getNextFile(m_outputFile);
        FileOutputStream outFileStream = new FileOutputStream(m_outputFile);
        audioOutStream = new BufferedOutputStream(outFileStream, memoryBufferSize);
    } catch (FileNotFoundException fe) {
        System.err.println(fe);
    } catch (OutOfMemoryError oe) {
        System.err.println(oe);
    }
}
Example 79
Project: Minim-master  File: MpegAudioFileReader.java View source code
/**
	 * Returns AudioFileFormat from URL.
	 */
public AudioFileFormat getAudioFileFormat(URL url) throws UnsupportedAudioFileException, IOException {
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): begin");
    }
    long lFileLengthInBytes = AudioSystem.NOT_SPECIFIED;
    URLConnection conn = url.openConnection();
    // Tell shoucast server (if any) that SPI support shoutcast stream.
    conn.setRequestProperty("Icy-Metadata", "1");
    InputStream inputStream = conn.getInputStream();
    AudioFileFormat audioFileFormat = null;
    try {
        audioFileFormat = getAudioFileFormat(inputStream, lFileLengthInBytes);
    } finally {
        inputStream.close();
    }
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): end");
    }
    return audioFileFormat;
}
Example 80
Project: musicuri-master  File: Toolset.java View source code
/**
	 * Extracts/encodes the AudioSignatureDS for a given audio file
	 * @param file the audio file to encode 
	 * @return a string containing the whole XML-formatted MPEG-7 description document
	 */
public static String createMPEG7Description(File file) throws IOException {
    if (isSupportedAudioFile(file)) {
        System.out.println("Extracting Query Audio Signature");
        String xmlString = null;
        Config configuration = new ConfigDefault();
        configuration.enableAll(false);
        configuration.setValue("AudioSignature", "enable", true);
        configuration.setValue("AudioSignature", "decimation", 32);
        //System.out.println("File: " + file.getName());
        AudioInputStream ais = null;
        try {
            ais = AudioSystem.getAudioInputStream(file);
            AudioFormat f = ais.getFormat();
            if (f.getEncoding() != AudioFormat.Encoding.PCM_SIGNED) {
                System.out.println("Converting Audio stream format");
                ais = AudioSystem.getAudioInputStream(AudioFormat.Encoding.PCM_SIGNED, ais);
                f = ais.getFormat();
            }
            String workingDir = getCWD();
            String tempFilename = workingDir + "/temp.wav";
            AudioSystem.write(ais, AudioFileFormat.Type.WAVE, new File(tempFilename));
            File tmpFile = new File(tempFilename);
            AudioInFloatSampled audioin = new AudioInFloatSampled(tmpFile);
            String str = tmpFile.getCanonicalPath();
            String[] ar = { str };
            //xmlString = Encoder.fromWAVtoXML(ar);
            // gather information about audio file
            MP7MediaInformation media_info = new MP7MediaInformation();
            media_info.setFileSize(tmpFile.length());
            AudioFormat format = audioin.getSourceFormat();
            media_info.setSample(format.getSampleRate(), format.getSampleSizeInBits());
            media_info.setNumberOfChannels(audioin.isMono() ? 1 : 2);
            // create mpeg-7 writer
            MP7Writer mp7writer = new MP7Writer();
            mp7writer.setMediaInformation(media_info);
            // create encoder
            Encoder encoder = null;
            Config config = new ConfigDefault();
            config.enableAll(false);
            config.setValue("AudioSignature", "enable", true);
            config.setValue("AudioSignature", "decimation", 32);
            encoder = new Encoder(audioin.getSampleRate(), mp7writer, config);
            //encoder.addTimeElapsedListener(new Ticker(System.err));
            // copy audio signal from source to encoder
            long oldtime = System.currentTimeMillis();
            float[] audio;
            while ((audio = audioin.get()) != null) {
                if (!audioin.isMono())
                    audio = AudioInFloat.getMono(audio);
                encoder.put(audio);
            }
            encoder.flush();
            System.out.println("Extraction Time     : " + (System.currentTimeMillis() - oldtime) + " ms");
            // whole MPEG-7 description into a string
            xmlString = mp7writer.toString();
        //System.out.println( xmlString )
        } catch (Exception e) {
            e.printStackTrace(System.err);
        } finally {
        //ais.close();
        }
        return xmlString;
    } else {
        System.out.println("Unsupported audio file format");
        return null;
    }
}
Example 81
Project: rhythos-master  File: MpegAudioFileReader.java View source code
/**
     * Returns AudioFileFormat from URL.
     */
public AudioFileFormat getAudioFileFormat(URL url) throws UnsupportedAudioFileException, IOException {
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): begin");
    }
    long lFileLengthInBytes = AudioSystem.NOT_SPECIFIED;
    URLConnection conn = url.openConnection();
    // Tell shoucast server (if any) that SPI support shoutcast stream.
    conn.setRequestProperty("Icy-Metadata", "1");
    InputStream inputStream = conn.getInputStream();
    AudioFileFormat audioFileFormat = null;
    try {
        audioFileFormat = getAudioFileFormat(inputStream, lFileLengthInBytes);
    } finally {
        inputStream.close();
    }
    if (TDebug.TraceAudioFileReader) {
        TDebug.out("MpegAudioFileReader.getAudioFileFormat(URL): end");
    }
    return audioFileFormat;
}
Example 82
Project: tiny-codec-master  File: TrackCutterConfiguration.java View source code
public void setTargetType(final AudioFileFormat.Type targetType) {
    TrackCutterConfiguration.logger.entering(TrackCutterConfiguration.class.getCanonicalName(), "setTargetType(AudioFileFormat.Type)", targetType);
    this.targetType = targetType;
    TrackCutterConfiguration.logger.exiting(TrackCutterConfiguration.class.getCanonicalName(), "setTargetType(AudioFileFormat.Type)");
}
Example 83
Project: JDK-master  File: AudioFileWriter.java View source code
/**
     * Indicates whether file writing support for the specified file type is provided
     * by this audio file writer.
     * @param fileType the file type for which write capabilities are queried
     * @return <code>true</code> if the file type is supported,
     * otherwise <code>false</code>
     */
public boolean isFileTypeSupported(AudioFileFormat.Type fileType) {
    AudioFileFormat.Type types[] = getAudioFileTypes();
    for (int i = 0; i < types.length; i++) {
        if (fileType.equals(types[i])) {
            return true;
        }
    }
    return false;
}
Example 84
Project: Minim-Android-master  File: AudioFileWriter.java View source code
/**
	 * Return true if the indicated type is supported by this provider.
	 * 
	 * @param type
	 *            the audio file format type
	 */
public boolean isFileTypeSupported(AudioFileFormat.Type type) {
    AudioFileFormat.Type[] types = getAudioFileTypes();
    for (int i = 0; i < types.length; ++i) {
        if (type.equals(types[i]))
            return true;
    }
    return false;
}
Example 85
Project: myrobotlab-master  File: AudioCapture.java View source code
public void save(String filename) throws IOException {
    File file = new File(filename);
    AudioSystem.write(audioInputStream, AudioFileFormat.Type.WAVE, file);
}
Example 86
Project: property-db-master  File: AudioFileWriter.java View source code
/** {@collect.stats}
     * Indicates whether file writing support for the specified file type is provided
     * by this audio file writer.
     * @param fileType the file type for which write capabilities are queried
     * @return <code>true</code> if the file type is supported,
     * otherwise <code>false</code>
     */
public boolean isFileTypeSupported(AudioFileFormat.Type fileType) {
    AudioFileFormat.Type types[] = getAudioFileTypes();
    for (int i = 0; i < types.length; i++) {
        if (fileType.equals(types[i])) {
            return true;
        }
    }
    return false;
}
Example 87
Project: speechalyzer-master  File: AudioUtil.java View source code
/**
	 * Write a byte array with a wav header to a file.
	 * 
	 * @param data
	 *            The byte array.
	 * @param format
	 *            The audioformat.
	 * @param fn
	 *            The filename.
	 * @throws Exception
	 */
public static void writeAudioToWavFile(byte[] data, AudioFormat format, String fn) throws Exception {
    AudioInputStream ais = new AudioInputStream(new ByteArrayInputStream(data), format, data.length);
    AudioSystem.write(ais, AudioFileFormat.Type.WAVE, new File(fn));
}
Example 88
Project: learning-bittorrent-master  File: BasicPlayer.java View source code
/**
	 * Returns source AudioFileFormat.
	 */
public AudioFileFormat getAudioFileFormat() {
    if (m_audioFileFormat != null) {
        return m_audioFileFormat;
    } else
        return null;
}