Java Examples for org.apache.lucene.analysis.standard.ClassicAnalyzer
The following java examples will help you to understand the usage of org.apache.lucene.analysis.standard.ClassicAnalyzer. These source code samples are taken from different open source projects.
Example 1
| Project: imixs-workflow-master File: LuceneSearchService.java View source code |
public static String normalizeSearchTerm(String searchTerm) throws QueryException {
if (searchTerm == null) {
return "";
}
if (searchTerm.trim().isEmpty()) {
return "";
}
ClassicAnalyzer analyzer = new ClassicAnalyzer();
QueryParser parser = new QueryParser("content", analyzer);
try {
Query result = parser.parse(escapeSearchTerm(searchTerm, false));
searchTerm = result.toString("content");
} catch (ParseException e) {
logger.warning("Unable to normalze serchTerm '" + searchTerm + "' -> " + e.getMessage());
throw new QueryException(QueryException.QUERY_NOT_UNDERSTANDABLE, e.getMessage(), e);
}
return escapeSearchTerm(searchTerm, true);
}Example 2
| Project: geode-master File: LuceneIndexXmlParserIntegrationJUnitTest.java View source code |
@Test
public void parseIndexWithAnalyzers() throws FileNotFoundException {
RegionCreation region = createRegionCreation("region");
// Validate expected indexes
Map<String, String[]> expectedIndexes = new HashMap<String, String[]>();
expectedIndexes.put("index", new String[] { "a", "b", "c" });
validateExpectedIndexes(region, expectedIndexes);
// Validate expected analyzers
Map<String, Map<String, Class<? extends Analyzer>>> expectedIndexAnalyzers = new HashMap<>();
Map<String, Class<? extends Analyzer>> expectedFieldAnalyzers = new HashMap<>();
expectedFieldAnalyzers.put("a", KeywordAnalyzer.class);
expectedFieldAnalyzers.put("b", SimpleAnalyzer.class);
expectedFieldAnalyzers.put("c", ClassicAnalyzer.class);
expectedIndexAnalyzers.put("index", expectedFieldAnalyzers);
validateExpectedAnalyzers(region, expectedIndexAnalyzers);
}Example 3
| Project: lucene-solr-master File: TestQualityRun.java View source code |
// use benchmark logic to create the mini Reuters index
private void createReutersIndex() throws Exception {
// 1. alg definition
String algLines[] = { "# ----- properties ", "content.source=org.apache.lucene.benchmark.byTask.feeds.LineDocSource", "analyzer=org.apache.lucene.analysis.standard.ClassicAnalyzer", "docs.file=" + getWorkDirResourcePath("reuters.578.lines.txt.bz2"), "content.source.log.step=2500", "doc.term.vector=false", "content.source.forever=false", "directory=FSDirectory", "doc.stored=true", "doc.tokenized=true", "# ----- alg ", "ResetSystemErase", "CreateIndex", "{ AddDoc } : *", "CloseIndex" };
// 2. execute the algorithm (required in every "logic" test)
execBenchmark(algLines);
}Example 4
| Project: heliosearch-master File: TestClassicAnalyzer.java View source code |
public void testMaxTermLength2() throws Exception {
ClassicAnalyzer sa = new ClassicAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(sa, "ab cd toolong xy z", new String[] { "ab", "cd", "toolong", "xy", "z" });
sa.setMaxTokenLength(5);
assertAnalyzesTo(sa, "ab cd toolong xy z", new String[] { "ab", "cd", "xy", "z" }, new int[] { 1, 1, 2, 1 });
}Example 5
| Project: constellation-master File: ClassicAnalyzerTest.java View source code |
@BeforeClass
public static void setUpClass() throws Exception {
FileUtilities.deleteDirectory(configDirectory);
List<Object> object = fillTestData();
GenericIndexer indexer = new GenericIndexer(object, null, configDirectory, "", new ClassicAnalyzer(Version.LATEST), Level.FINER, true);
indexer.destroy();
indexSearcher = new LuceneIndexSearcher(configDirectory, "", new ClassicAnalyzer(Version.LATEST), true);
indexSearcher.setLogLevel(Level.FINER);
}Example 6
| Project: solr-analytics-master File: TestClassicAnalyzer.java View source code |
public void testMaxTermLength2() throws Exception {
ClassicAnalyzer sa = new ClassicAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(sa, "ab cd toolong xy z", new String[] { "ab", "cd", "toolong", "xy", "z" });
sa.setMaxTokenLength(5);
assertAnalyzesTo(sa, "ab cd toolong xy z", new String[] { "ab", "cd", "xy", "z" }, new int[] { 1, 1, 2, 1 });
}Example 7
| Project: pylucene-master File: TestClassicAnalyzer.java View source code |
public void testMaxTermLength2() throws Exception {
ClassicAnalyzer sa = new ClassicAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(sa, "ab cd toolong xy z", new String[] { "ab", "cd", "toolong", "xy", "z" });
sa.setMaxTokenLength(5);
assertAnalyzesTo(sa, "ab cd toolong xy z", new String[] { "ab", "cd", "xy", "z" }, new int[] { 1, 1, 2, 1 });
}Example 8
| Project: stratio-cassandra-master File: PreBuiltAnalyzersTest.java View source code |
@Test
public void testGetClassic() {
Analyzer analyzer = PreBuiltAnalyzers.CLASSIC.get();
Assert.assertEquals(ClassicAnalyzer.class, analyzer.getClass());
}Example 9
| Project: cassandra-lucene-index-master File: StandardAnalyzers.java View source code |
@Override
protected Analyzer build() {
return new ClassicAnalyzer();
}Example 10
| Project: couchdb-lucene-master File: Analyzers.java View source code |
@Override
public Analyzer newAnalyzer(final String args) {
return new ClassicAnalyzer();
}Example 11
| Project: infoglue-master File: LuceneUsersController.java View source code |
private Analyzer getAnalyzer() throws Exception {
return new ClassicAnalyzer(Version.LUCENE_34);
//return new KeywordAnalyzer();
//return new WhitespaceAnalyzer(Version.LUCENE_34);
//return new StandardAnalyzer(Version.LUCENE_34);
}Example 12
| Project: elasticsearch-master File: PreBuiltAnalyzers.java View source code |
@Override
protected Analyzer create(Version version) {
Analyzer a = new ClassicAnalyzer();
a.setVersion(version.luceneVersion);
return a;
}Example 13
| Project: elassandra-master File: PreBuiltAnalyzers.java View source code |
@Override
protected Analyzer create(Version version) {
Analyzer a = new ClassicAnalyzer();
a.setVersion(version.luceneVersion);
return a;
}