package haven.geoloc; import java.awt.Graphics2D; import java.awt.color.ColorSpace; import java.awt.image.BufferedImage; import java.awt.image.ColorConvertOp; /* * Based on Elliot Shepherd's implementation of Neal Krawetz perceptual hashing algorithm. * http://www.hackerfactor.com/blog/index.php?/archives/432-Looks-Like-It.html */ public class PHash { private int size = 32; private int smallerSize = 8; private ColorConvertOp colorConvert = new ColorConvertOp(ColorSpace.getInstance(ColorSpace.CS_GRAY), null); public PHash(int size, int smallerSize) { this.size = size; this.smallerSize = smallerSize; initCoefficients(); } public long getHash(BufferedImage img) { /* * Reduce size. Like Average Hash, pHash starts with a small image. However, the image * is larger than 8x8; 32x32 is a good size. This is really done to simplify the DCT * computation and not because it is needed to reduce the high frequencies. */ img = resize(img, size, size); /* * Reduce color. The image is reduced to a grayscale just to further simplify the * number of computations. */ img = grayscale(img); double[][] vals = new double[size][size]; for (int x = 0; x < img.getWidth(); x++) { for (int y = 0; y < img.getHeight(); y++) { vals[x][y] = ((img.getRGB(x, y)) & 0x0000ff00) >> 0x08; } } /* * Compute the DCT. The DCT separates the image into a collection of frequencies and * scalars. While JPEG uses an 8x8 DCT, this algorithm uses a 32x32 DCT. */ double[][] dctVals = dct(vals); /* * Reduce the DCT. While the DCT is 32x32, just keep the top-left 8x8. Those represent * the lowest frequencies in the picture. * * Compute the average value. Like the Average Hash, compute the mean DCT value * (using only the 8x8 DCT low-frequency values and excluding the first term since the * DC coefficient can be significantly different from the other values and will throw * off the average). */ double total = 0; for (int x = 0; x < smallerSize; x++) { for (int y = 0; y < smallerSize; y++) { total += dctVals[x][y]; } } total -= dctVals[0][0]; double avg = total / (double) ((smallerSize * smallerSize) - 1); /* * Further reduce the DCT. This is the magic step. Set the 64 hash bits to 0 or 1 * depending on whether each of the 64 DCT values is above or below the average value. * The result doesn't tell us the actual low frequencies; it just tells us the * very-rough relative scale of the frequencies to the mean. The result will not vary * as long as the overall structure of the image remains the same; this can survive * gamma and color histogram adjustments without a problem. */ long hash = 0L; int i = 0; for (int x = 0; x < smallerSize; x++) { for (int y = 0; y < smallerSize; y++) { if (x != 0 && y != 0) { if (dctVals[x][y] > avg) hash ^= 1L << (63 - i); i++; } } } return hash; } private BufferedImage resize(BufferedImage img, int width, int height) { BufferedImage resizedImage = new BufferedImage(width, height, BufferedImage.TYPE_INT_ARGB); Graphics2D g = resizedImage.createGraphics(); g.drawImage(img, 0, 0, width, height, null); g.dispose(); return resizedImage; } private BufferedImage grayscale(BufferedImage img) { colorConvert.filter(img, img); return img; } // DCT function stolen from // http://stackoverflow.com/questions/4240490/problems-with-dct-and-idct-algorithm-in-java private double[] c; private void initCoefficients() { c = new double[size]; for (int i = 1; i < size; i++) { c[i] = 1; } c[0] = 1 / Math.sqrt(2.0); } private double[][] dct(double[][] f) { int N = size; double[][] F = new double[N][N]; for (int u = 0; u < N; u++) { for (int v = 0; v < N; v++) { double sum = 0.0; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { sum += Math.cos(((2 * i + 1) / (2.0 * N)) * u * Math.PI) * Math.cos(((2 * j + 1) / (2.0 * N)) * v * Math.PI) * (f[i][j]); } } sum *= ((c[u] * c[v]) / 4.0); F[u][v] = sum; } } return F; } }