Java Code Examples for org.apache.lucene.search.Query

The following code examples are extracted from open source projects. You can click to vote up the examples that are useful to you.

Example 1

From project lor-jamwiki, under directory /jamwiki-web/src/main/java/org/jamwiki/search/.

Source file: LuceneSearchEngine.java

  23 
vote

/** 
 * Given the search text, searcher object, and query analyzer generate an appropriate Lucene search query.
 */
protected Query createSearchQuery(IndexSearcher searcher,StandardAnalyzer analyzer,String text,List<Integer> namespaces) throws IOException, ParseException {
  BooleanQuery fullQuery=new BooleanQuery();
  QueryParser qp;
  if (namespaces != null && !namespaces.isEmpty()) {
    qp=new QueryParser(USE_LUCENE_VERSION,FIELD_TOPIC_NAMESPACE,analyzer);
    StringBuilder namespaceText=new StringBuilder();
    for (    Integer namespaceId : namespaces) {
      if (namespaceText.length() != 0) {
        namespaceText.append(" ").append(QueryParser.Operator.OR).append(" ");
      }
      namespaceText.append(namespaceId);
    }
    fullQuery.add(qp.parse(namespaceText.toString()),Occur.MUST);
  }
  BooleanQuery nameAndContentQuery=new BooleanQuery();
  qp=new QueryParser(USE_LUCENE_VERSION,FIELD_TOPIC_NAME_ANALYZED,analyzer);
  nameAndContentQuery.add(qp.parse(text),Occur.SHOULD);
  qp=new QueryParser(USE_LUCENE_VERSION,FIELD_TOPIC_CONTENT,analyzer);
  nameAndContentQuery.add(qp.parse(text),Occur.SHOULD);
  Query subQuery=searcher.rewrite(nameAndContentQuery);
  fullQuery.add(subQuery,Occur.MUST);
  return fullQuery;
}
 

Example 2

From project capedwarf-blue, under directory /prospectivesearch/src/main/java/org/jboss/capedwarf/prospectivesearch/.

Source file: CapedwarfProspectiveSearchService.java

  22 
vote

public List<Subscription> listSubscriptions(String topic,String subIdStart,int maxResults,long expiresBefore){
  Query luceneQuery=newQueryBuilder().keyword().onField("topic").matching(topic).createQuery();
  CacheQuery query=getCacheQuery(luceneQuery);
  List<Object> results=query.list();
  List<Subscription> list=new ArrayList<Subscription>(results.size());
  for (  Object o : results) {
    SubscriptionHolder holder=(SubscriptionHolder)o;
    list.add(holder.toSubscription());
  }
  return list;
}
 

Example 3

From project eclipse-instasearch, under directory /instasearch/src/it/unibz/instasearch/indexing/.

Source file: Searcher.java

  22 
vote

/** 
 * @param searchQuery
 * @return
 * @throws ParseException 
 */
private Query createExactQuery(SearchQuery searchQuery) throws ParseException {
  Query query=parserSearchString(searchQuery.getSearchString(),new KeywordAnalyzer());
  VisitableQuery visitableQuery=new VisitableQuery(query);
  visitableQuery.accept(uppercaseNameExpander);
  visitableQuery.accept(fileNameSearcher);
  query=visitableQuery.getQuery();
  return query;
}
 

Example 4

From project gast-lib, under directory /app/src/root/gast/playground/speech/food/lucene/.

Source file: FoodSearcher.java

  22 
vote

public List<Food> findMatching(String target){
  try {
    QueryParser parser=new QueryParser(LuceneParameters.VERSION,FoodDocumentTranslator.FOOD_NAME,analyzer);
    Query query=parser.parse(target);
    return executeQuery(query);
  }
 catch (  ParseException e) {
    Log.e(TAG,"error",e);
    return new ArrayList<Food>();
  }
}
 

Example 5

From project greplin-lucene-utils, under directory /src/main/java/com/greplin/lucene/query/.

Source file: PredicateQuery.java

  22 
vote

/** 
 * Rewrites the wrapped query.
 * @param reader the reader to rewrite for.
 * @return the rewritten query.
 * @throws IOException if IO issues occur.
 */
@Override public Query rewrite(final IndexReader reader) throws IOException {
  Query rewritten=this.query.rewrite(reader);
  if (rewritten != this.query) {
    return new PredicateQuery(rewritten,this.predicate);
  }
 else {
    return this;
  }
}
 

Example 6

From project hibernateuniversity-devoxx, under directory /demo/querydsl/src/main/java/org/jboss/hibernateUniversity/criteria/action/.

Source file: BookManager.java

  22 
vote

/** 
 * Type-safe query range query object to string conversion
 */
public void getAllBooksFrom1977(){
  FullTextEntityManager em=lazyEM.get();
  final QueryBuilder builder=em.getSearchFactory().buildQueryBuilder().forEntity(Book.class).get();
  Calendar pubDate=Calendar.getInstance();
  pubDate.set(1977,0,01);
  Date begin=new Date(pubDate.getTimeInMillis());
  pubDate.set(1977,11,31);
  Date end=new Date(pubDate.getTimeInMillis());
  final Query luceneQuery=builder.range().onField("publicationDate").from(begin).to(end).createQuery();
  System.out.println(luceneQuery.toString());
  final FullTextQuery query=em.createFullTextQuery(luceneQuery,Book.class);
  final List<Book> resultList=query.setFirstResult(0).setMaxResults(5).getResultList();
  displayListOfBooks(resultList,query);
}
 

Example 7

From project jsword, under directory /src/test/java/org/crosswire/jsword/index/lucene/analysis/.

Source file: AnalyzerFactoryTest.java

  22 
vote

public void testCustomStopWordFiltering() throws ParseException {
  AbstractBookAnalyzer myAnalyzer=new EnglishLuceneAnalyzer();
  QueryParser parser=new QueryParser(Version.LUCENE_29,field,myAnalyzer);
  myAnalyzer.setDoStopWords(true);
  String[] stopWords={"thy","ye","unto","shalt"};
  myAnalyzer.setStopWords(new CharArraySet(Arrays.asList(stopWords),false));
  String testInput="Upon thy belly Shalt thou go";
  Query query=parser.parse(testInput);
  assertTrue(query.toString().indexOf(field + ":shalt") == -1);
  assertTrue(query.toString().indexOf(field + ":thy") == -1);
  assertTrue(query.toString().indexOf(field + ":upon") > -1);
}
 

Example 8

From project Kairos, under directory /src/java/org/apache/nutch/tools/.

Source file: PruneIndexTool.java

  22 
vote

/** 
 * Read a list of Lucene queries from the stream (UTF-8 encoding is assumed). There should be a single Lucene query per line. Blank lines and comments starting with '#' are allowed. <p>NOTE: you may wish to use  {@link org.apache.nutch.searcher.Query#main(String[])}method to translate queries from Nutch format to Lucene format.</p>
 * @param is InputStream to read from
 * @return array of Lucene queries
 * @throws Exception
 */
public static Query[] parseQueries(InputStream is) throws Exception {
  BufferedReader br=new BufferedReader(new InputStreamReader(is,"UTF-8"));
  String line=null;
  QueryParser qp=new QueryParser("url",new WhitespaceAnalyzer());
  Vector<Query> queries=new Vector<Query>();
  while ((line=br.readLine()) != null) {
    line=line.trim();
    if (line.length() == 0 || line.charAt(0) == '#')     continue;
    Query q=qp.parse(line);
    queries.add(q);
  }
  return queries.toArray(new Query[0]);
}
 

Example 9

From project le11-nls, under directory /src/test/java/com/github/le11/nls/solr/.

Source file: SolrNLSQParserPluginTest.java

  22 
vote

@Test public void testSimple(){
  try {
    SolrNLSQParserPlugin solrNLSQParserPlugin=new SolrNLSQParserPlugin();
    LocalSolrQueryRequest request=testHarness.getRequestFactory("standard",0,10).makeRequest("q","\"people working at Google Amsterdam office\"","debugQuery","true");
    QParser nlsQParser=solrNLSQParserPlugin.createParser("people working at Google Amsterdam office",new MapSolrParams(new HashMap<String,String>()),new MapSolrParams(new HashMap<String,String>()),request);
    Query q=nlsQParser.parse();
    assertNotNull(q);
    System.out.println(q.toString());
  }
 catch (  Exception e) {
    e.printStackTrace();
    fail(e.getLocalizedMessage());
  }
}
 

Example 10

From project montysolr, under directory /contrib/adsabs/src/java/org/apache/lucene/queryParser/aqp/builders/.

Source file: AqpAdslabsSubSueryProvider.java

  22 
vote

public Query parse(FunctionQParser fp) throws ParseException {
  Query innerQuery=fp.parseNestedQuery();
  SolrQueryRequest req=fp.getReq();
  String refField="reference";
  String idField="bibcode";
  return new SecondOrderQuery(innerQuery,null,new SecondOrderCollectorCitedBy(idField,refField),false);
}
 

Example 11

From project agile, under directory /agile-apps/agile-app-search/src/main/java/org/headsupdev/agile/app/search/feed/.

Source file: SearchFeed.java

  21 
vote

protected void populateFeed(Element root){
  String query=parameters.getString("query");
  List<Object[]> results=new LinkedList<Object[]>();
  Session session=((HibernateStorage)Manager.getStorageInstance()).getHibernateSession();
  FullTextSession fullTextSession=org.hibernate.search.Search.createFullTextSession(((SessionProxy)session).getRealSession());
  MultiFieldQueryParser parser=new MultiFieldQueryParser(new ArrayList<String>(HibernateUtil.getSearchFields()).toArray(new String[0]),new StandardAnalyzer());
  try {
    Query q=parser.parse(query);
    FullTextQuery textQuery=fullTextSession.createFullTextQuery(q);
    textQuery.setProjection(FullTextQuery.SCORE,FullTextQuery.THIS);
    textQuery.setMaxResults(25);
    results=textQuery.list();
  }
 catch (  Exception e) {
    Manager.getLogger(getClass().getName()).error("Failed to run search",e);
  }
  for (  Object[] o : results) {
    Element node=new Element("result");
    int relevance=(int)(((Float)o[0]) * 100);
    String title=o[1].toString();
    String link="";
    if (o[1] instanceof SearchResult) {
      link=((SearchResult)o[1]).getLink();
    }
    node.addContent(new Element("title").addContent(title));
    node.addContent(new Element("relevance").addContent(relevance + "%"));
    node.addContent(new Element("link").addContent(storage.getGlobalConfiguration().getFullUrl(link)));
    String image=Search.getClassImageName(o[1]);
    node.addContent(new Element("icon").addContent(storage.getGlobalConfiguration().getFullUrl("resources/org.headsupdev.agile.HeadsUpResourceMarker/" + image)));
    root.addContent(node);
  }
}
 

Example 12

From project ATHENA, under directory /core/apa/src/main/java/org/fracturedatlas/athena/apa/indexing/.

Source file: IndexingApaAdapter.java

  21 
vote

public Set<Object> searchIndex(AthenaSearch search){
  Set<Object> ids=new HashSet<Object>();
  if (indexingDisabled) {
    return ids;
  }
  String query=search.getQuery();
  query=query + " AND _type:" + search.getType();
  logger.debug("{}",query);
  Integer start=0;
  if (search.getStart() != null) {
    start=search.getStart();
  }
  ;
  Integer limit=DEFAULT_PAGE_SIZE;
  if (search.getLimit() != null) {
    limit=search.getLimit();
  }
  Integer numResults=start + limit;
  try {
    QueryParser queryParser=new QueryParser(Version.LUCENE_32,DOC_TEXT,analyzer);
    queryParser.setAllowLeadingWildcard(true);
    Query q=queryParser.parse(query);
    IndexReader reader=IndexReader.open(getWriter(),false);
    IndexSearcher indexSearcher=new IndexSearcher(reader);
    TopDocs topDocs=indexSearcher.search(q,numResults);
    ScoreDoc[] hits=topDocs.scoreDocs;
    for (int i=start; i < hits.length; ++i) {
      int docId=hits[i].doc;
      Document d=indexSearcher.doc(docId);
      ids.add(d.get("_id"));
    }
    reader.close();
    indexSearcher.close();
    return ids;
  }
 catch (  Exception e) {
    e.printStackTrace();
    return null;
  }
}
 

Example 13

From project book, under directory /src/main/java/com/tamingtext/classifier/mlt/.

Source file: MoreLikeThisCategorizer.java

  21 
vote

public CategoryHits[] categorize(Reader reader) throws IOException {
  Query query=moreLikeThis.like(reader);
  HashMap<String,CategoryHits> categoryHash=new HashMap<String,CategoryHits>(25);
  for (  ScoreDoc sd : indexSearcher.search(query,maxResults).scoreDocs) {
    String cat=getDocClass(sd.doc);
    if (cat == null)     continue;
    CategoryHits ch=categoryHash.get(cat);
    if (ch == null) {
      ch=new CategoryHits();
      ch.setLabel(cat);
      categoryHash.put(cat,ch);
    }
    ch.incrementScore(sd.score);
  }
  SortedSet<CategoryHits> sortedCats=new TreeSet<CategoryHits>(CategoryHits.byScoreComparator());
  sortedCats.addAll(categoryHash.values());
  return sortedCats.toArray(new CategoryHits[0]);
}
 

Example 14

From project EARQ, under directory /src/main/java/org/openjena/earq/searchers/.

Source file: LuceneIndexSearcher.java

  21 
vote

@Override public Iterator<Document> search(String query){
  Searcher indexSearcher=new org.apache.lucene.search.IndexSearcher(indexReader);
  Query luceneQuery=null;
  ArrayList<Document> hits=new ArrayList<Document>();
  try {
    luceneQuery=queryParser.parse(query);
    TopDocs docs=indexSearcher.search(luceneQuery,LuceneConstants.NUM_RESULTS);
    for (int i=0; i < docs.totalHits; i++) {
      org.apache.lucene.document.Document luceneDocument=indexSearcher.doc(i);
      Document doc=new Document();
      List<Fieldable> fields=luceneDocument.getFields();
      for (      Fieldable field : fields) {
        doc.set(field.name(),field.stringValue());
      }
      doc.set(EARQ.fScore,String.valueOf(docs.scoreDocs[i].score));
      hits.add(doc);
    }
  }
 catch (  Exception e) {
    throw new EARQException(e.getMessage(),e);
  }
 finally {
    try {
      indexSearcher.close();
    }
 catch (    IOException e) {
    }
  }
  return hits.iterator();
}
 

Example 15

From project elephant-twin, under directory /com.twitter.elephanttwin.lucene/src/main/java/com/twitter/elephanttwin/lucene/retrieval/.

Source file: HDFSQueryEngine.java

  21 
vote

public TopDocs query(String q,int numHits){
  MorePreconditions.checkNotBlank(q);
  Query query;
  try {
    query=parser.parse(q);
  }
 catch (  ParseException e) {
    throw new IllegalArgumentException("Invalid query: " + q,e);
  }
  try {
    return searcher.search(query,numHits);
  }
 catch (  IOException e) {
    throw new RuntimeException("Error searching the index!",e);
  }
}
 

Example 16

From project entando-core-engine, under directory /src/main/java/com/agiletec/plugins/jacms/aps/system/services/searchengine/.

Source file: SearcherDAO.java

  21 
vote

/** 
 * Ricerca una lista di identificativi di contenuto in base  al codice della lingua corrente ed alla parola immessa.
 * @param langCode Il codice della lingua corrente.
 * @param word La parola in base al quale fare la ricerca. Nel caso venissero inserite stringhe di ricerca del tipo "Venice Amsterdam"  viene considerato come se fosse "Venice OR Amsterdam".
 * @param allowedGroups I gruppi autorizzati alla visualizzazione. Nel caso che la collezione sia nulla o vuota, la ricerca sar? effettuata su contenuti  referenziati con il gruppo "Ad accesso libero". Nel caso che nella collezione  sia presente il gruppo degli "Amministratori", la ricerca produrr? un'insieme  di identificativi di contenuto non filtrati per gruppo.
 * @return La lista di identificativi contenuto.
 * @throws ApsSystemException
 */
public List<String> searchContentsId(String langCode,String word,Collection<String> allowedGroups) throws ApsSystemException {
  List<String> contentsId=new ArrayList<String>();
  IndexSearcher searcher=null;
  try {
    searcher=this.getSearcher();
    QueryParser parser=new QueryParser(Version.LUCENE_30,langCode,this.getAnalyzer());
    String queryString=this.createQueryString(langCode,word,allowedGroups);
    Query query=parser.parse(queryString);
    int maxSearchLength=1000;
    TopDocs topDocs=searcher.search(query,null,maxSearchLength);
    ScoreDoc[] scoreDoc=topDocs.scoreDocs;
    if (scoreDoc.length > 0) {
      for (int index=0; index < scoreDoc.length; index++) {
        ScoreDoc sDoc=scoreDoc[index];
        Document doc=searcher.doc(sDoc.doc);
        contentsId.add(doc.get(IIndexerDAO.CONTENT_ID_FIELD_NAME));
      }
    }
  }
 catch (  IOException e) {
    throw new ApsSystemException("Errore in estrazione " + "documento in base ad indice",e);
  }
catch (  ParseException e) {
    throw new ApsSystemException("Errore parsing nella ricerca",e);
  }
 finally {
    this.releaseSearcher(searcher);
  }
  return contentsId;
}
 

Example 17

From project grails-searchable, under directory /src/java/grails/plugin/searchable/internal/lucene/.

Source file: LuceneUtils.java

  21 
vote

/** 
 * Returns a list of terms by parsing the given query string - special query characters and words (OR/AND) are not included in the returned list
 * @param queryString the query string to parse
 * @param analyzer the Analyzer instance, may be null in which case Lucene's StandardAnalyzer is used
 * @return a list of text terms
 * @throws org.apache.lucene.queryParser.ParseException if the query is invalid
 */
public static String[] termsForQueryString(String queryString,Analyzer analyzer) throws ParseException {
  if (analyzer == null) {
    analyzer=new StandardAnalyzer();
  }
  final String defaultField="$termsForQueryString_defaultField$";
  QueryParser queryParser=new QueryParser(defaultField,analyzer);
  Query query=queryParser.parse(queryString);
  Set terms=new ListNotSet();
  query.extractTerms(terms);
  String[] termsArray=new String[terms.size()];
  int i=0;
  for (Iterator iter=terms.iterator(); iter.hasNext(); ) {
    termsArray[i++]=((Term)iter.next()).text();
  }
  return termsArray;
}
 

Example 18

From project guj.com.br, under directory /src/net/jforum/search/.

Source file: LuceneSearch.java

  21 
vote

private SearchResult performSearch(SearchArgs args,LuceneResultCollector resultCollector,Filter filter){
  SearchResult result;
  try {
    StringBuffer criteria=new StringBuffer(256);
    this.filterByForum(args,criteria);
    this.filterByKeywords(args,criteria);
    this.filterByDateRange(args,criteria);
    Query query=new QueryParser("",new StandardAnalyzer()).parse(criteria.toString());
    if (logger.isDebugEnabled()) {
      logger.debug("Generated query: " + query);
    }
    Hits hits=filter == null ? this.search.search(query,this.getSorter(args)) : this.search.search(query,filter,this.getSorter(args));
    if (hits != null && hits.length() > 0) {
      result=new SearchResult(resultCollector.collect(args,hits,query),hits.length());
    }
 else {
      result=new SearchResult(new ArrayList(),0);
    }
  }
 catch (  Exception e) {
    throw new SearchException(e);
  }
  return result;
}
 

Example 19

From project hibernate-ogm, under directory /hibernate-ogm-core/src/test/java/org/hibernate/ogm/test/hsearch/.

Source file: HibernateSearchAtopOgmTest.java

  21 
vote

@Test public void testHibernateSearchJPAAPIUsage() throws Exception {
  getTransactionManager().begin();
  final FullTextEntityManager ftem=Search.getFullTextEntityManager(getFactory().createEntityManager());
  final Insurance insurance=new Insurance();
  insurance.setName("Macif");
  ftem.persist(insurance);
  getTransactionManager().commit();
  ftem.clear();
  getTransactionManager().begin();
  final QueryBuilder b=ftem.getSearchFactory().buildQueryBuilder().forEntity(Insurance.class).get();
  final Query lq=b.keyword().onField("name").matching("Macif").createQuery();
  final FullTextQuery ftQuery=ftem.createFullTextQuery(lq,Insurance.class);
  ftQuery.initializeObjectsWith(ObjectLookupMethod.SKIP,DatabaseRetrievalMethod.FIND_BY_ID);
  final List<Insurance> resultList=ftQuery.getResultList();
  assertThat(getFactory().getPersistenceUnitUtil().isLoaded(resultList.get(0))).isTrue();
  assertThat(resultList).hasSize(1);
  for (  Object e : resultList) {
    ftem.remove(e);
  }
  getTransactionManager().commit();
  ftem.close();
}
 

Example 20

From project jAPS2, under directory /src/com/agiletec/plugins/jacms/aps/system/services/searchengine/.

Source file: SearcherDAO.java

  21 
vote

/** 
 * Ricerca una lista di identificativi di contenuto in base  al codice della lingua corrente ed alla parola immessa.
 * @param langCode Il codice della lingua corrente.
 * @param word La parola in base al quale fare la ricerca. Nel caso venissero inserite stringhe di ricerca del tipo "Venice Amsterdam"  viene considerato come se fosse "Venice OR Amsterdam".
 * @param allowedGroups I gruppi autorizzati alla visualizzazione. Nel caso che la collezione sia nulla o vuota, la ricerca sar? effettuata su contenuti  referenziati con il gruppo "Ad accesso libero". Nel caso che nella collezione  sia presente il gruppo degli "Amministratori", la ricerca produrr? un'insieme  di identificativi di contenuto non filtrati per gruppo.
 * @return La lista di identificativi contenuto.
 * @throws ApsSystemException
 */
public List<String> searchContentsId(String langCode,String word,Collection<String> allowedGroups) throws ApsSystemException {
  List<String> contentsId=new ArrayList<String>();
  IndexSearcher searcher=null;
  try {
    searcher=this.getSearcher();
    QueryParser parser=new QueryParser(Version.LUCENE_30,langCode,this.getAnalyzer());
    String queryString=this.createQueryString(langCode,word,allowedGroups);
    Query query=parser.parse(queryString);
    int maxSearchLength=1000;
    TopDocs topDocs=searcher.search(query,null,maxSearchLength);
    ScoreDoc[] scoreDoc=topDocs.scoreDocs;
    if (scoreDoc.length > 0) {
      for (int index=0; index < scoreDoc.length; index++) {
        ScoreDoc sDoc=scoreDoc[index];
        Document doc=searcher.doc(sDoc.doc);
        contentsId.add(doc.get(IIndexerDAO.CONTENT_ID_FIELD_NAME));
      }
    }
  }
 catch (  IOException e) {
    throw new ApsSystemException("Errore in estrazione " + "documento in base ad indice",e);
  }
catch (  ParseException e) {
    throw new ApsSystemException("Errore parsing nella ricerca",e);
  }
 finally {
    this.releaseSearcher(searcher);
  }
  return contentsId;
}
 

Example 21

From project java-maven-tests, under directory /src/lucene-benchmark/src/test/java/eg/sample/lb/.

Source file: BasicLuceneTest.java

  21 
vote

@Test public void testLuceneUsage() throws IOException, ParseException {
  final Analyzer analyzer=new StandardAnalyzer(LUCENE_VERSION);
  final Directory directory=new RAMDirectory();
  final IndexWriterConfig writerConfig=new IndexWriterConfig(LUCENE_VERSION,analyzer);
  writerConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
  IndexWriter indexWriter=new IndexWriter(directory,writerConfig);
  final Document luceneDocument=new Document();
  final String text="This is the text to be indexed.";
  luceneDocument.add(new Field("fieldname",text,Field.Store.YES,Field.Index.ANALYZED));
  indexWriter.addDocument(luceneDocument);
  indexWriter.optimize();
  indexWriter.close();
  IndexSearcher indexSearcher=new IndexSearcher(directory);
  QueryParser parser=new QueryParser(LUCENE_VERSION,"fieldname",analyzer);
  Query query=parser.parse("text");
  final TopDocs hits=indexSearcher.search(query,10);
  assertEquals(1,hits.totalHits);
  for (int i=0; i < hits.totalHits; i++) {
    final ScoreDoc scoreDoc=hits.scoreDocs[i];
    final Document hitDoc=indexSearcher.doc(scoreDoc.doc);
    assertEquals("This is the text to be indexed.",hitDoc.get("fieldname"));
  }
  indexSearcher.close();
  directory.close();
}
 

Example 22

From project jforum2, under directory /src/net/jforum/search/.

Source file: LuceneSearch.java

  21 
vote

private SearchResult performSearch(SearchArgs args,LuceneResultCollector resultCollector,Filter filter){
  SearchResult result;
  try {
    StringBuffer criteria=new StringBuffer(256);
    this.filterByForum(args,criteria);
    this.filterByKeywords(args,criteria);
    this.filterByDateRange(args,criteria);
    Query query=new QueryParser("",new StandardAnalyzer()).parse(criteria.toString());
    if (logger.isDebugEnabled()) {
      logger.debug("Generated query: " + query);
    }
    Hits hits=filter == null ? this.search.search(query,this.getSorter(args)) : this.search.search(query,filter,this.getSorter(args));
    if (hits != null && hits.length() > 0) {
      result=new SearchResult(resultCollector.collect(args,hits,query),hits.length());
    }
 else {
      result=new SearchResult(new ArrayList(),0);
    }
  }
 catch (  Exception e) {
    throw new SearchException(e);
  }
  return result;
}
 

Example 23

From project jspwiki, under directory /src/org/apache/wiki/search/.

Source file: LuceneSearchProvider.java

  21 
vote

/** 
 * {@inheritDoc}
 */
public void pageRemoved(WikiPage page){
  IndexWriter writer=null;
  try {
    Directory luceneDir=new SimpleFSDirectory(new File(m_luceneDirectory),null);
    writer=getIndexWriter(luceneDir);
    Query query=new TermQuery(new Term(LUCENE_ID,page.getName()));
    writer.deleteDocuments(query);
  }
 catch (  Exception e) {
    log.error("Unable to remove page '" + page.getName() + "' from Lucene index",e);
  }
 finally {
    close(writer);
  }
}
 

Example 24

From project maven-dependency-analyzer, under directory /src/main/java/nl/pieni/maven/dependency_analyzer/repository/remote/.

Source file: RemoteRepositorySearcher.java

  21 
vote

/** 
 * Create the query to perform
 * @param groupPatterns list of patters to search for
 * @param packaging the packinging of the artifact searched for
 * @return {@link BooleanQuery}
 */
private BooleanQuery createQuery(final List<String> groupPatterns,final List<String> packaging){
  BooleanQuery bq=new BooleanQuery();
  Query query;
  Field field=MAVEN.GROUP_ID;
  BooleanQuery groupQuery=new BooleanQuery();
  for (  String pattern : groupPatterns) {
    if (pattern.endsWith("*")) {
      query=indexer.constructQuery(field,pattern,SearchType.EXACT);
    }
 else {
      query=indexer.constructQuery(field,pattern + "*",SearchType.EXACT);
    }
    groupQuery.add(query,BooleanClause.Occur.SHOULD);
  }
  bq.add(groupQuery,BooleanClause.Occur.MUST);
  BooleanQuery packagingQuery=new BooleanQuery();
  for (  String pack : packaging) {
    Query q=indexer.constructQuery(MAVEN.PACKAGING,pack,SearchType.EXACT);
    packagingQuery.add(q,BooleanClause.Occur.SHOULD);
  }
  bq.add(packagingQuery,BooleanClause.Occur.MUST);
  Query queryClassifierSources=indexer.constructQuery(MAVEN.CLASSIFIER,"sources",SearchType.EXACT);
  Query queryClassifierJavaDoc=indexer.constructQuery(MAVEN.CLASSIFIER,"javadoc",SearchType.EXACT);
  bq.add(new BooleanClause(queryClassifierJavaDoc,BooleanClause.Occur.MUST_NOT));
  bq.add(new BooleanClause(queryClassifierSources,BooleanClause.Occur.MUST_NOT));
  getLog().info("Created query: " + bq);
  return bq;
}
 

Example 25

From project mdk, under directory /service/lucene/src/main/java/uk/ac/ebi/mdk/service/query/.

Source file: AbstractLuceneService.java

  21 
vote

/** 
 * Construct a query without using the QueryParser. This is useful when you want to search an field that is analyzed and maintain space's. The token stream is converted into a boolean 'Must Occur' query. For most simple queries this method can be used. The approximate flag allows construction of approximate  {@see FuzzyMatch} queries for each token. The similarityfor the fuzzy match can be set via the  {@see setMinSimilarity(float)}method.
 * @param text        text to construct the query for
 * @param term        the field to search the text in
 * @param approximate whether to use approximate search
 * @return searchable query
 */
public Query construct(String text,Term term,boolean approximate){
  StringReader reader=new StringReader(text);
  TokenStream stream=analyzer.tokenStream(term.field(),reader);
  BooleanQuery query=new BooleanQuery();
  CharTermAttribute termAttribute=stream.getAttribute(CharTermAttribute.class);
  try {
    while (stream.incrementToken()) {
      Term termToken=term.createTerm(termAttribute.toString());
      Query subQuery=approximate ? new FuzzyQuery(termToken,getMinSimilarity()) : new TermQuery(termToken);
      query.add(subQuery,BooleanClause.Occur.MUST);
    }
  }
 catch (  IOException ex) {
    LOGGER.error("Could not constructing query ",ex);
  }
  return query;
}
 

Example 26

From project AdServing, under directory /modules/db/src/main/java/net/mad/ads/db/utils/.

Source file: QueryHelper.java

  19 
vote

public Query getConditionalQuery(AdRequest request,AdDB addb){
  if (!request.hasConditions()) {
    return null;
  }
  BooleanQuery query=new BooleanQuery();
  for (  Condition condition : addb.manager.getConditions()) {
    condition.addQuery(request,query);
  }
  if (query.getClauses() == null || query.getClauses().length == 0) {
    return null;
  }
  return query;
}
 

Example 27

From project couchdb-lucene, under directory /src/main/java/com/github/rnewson/couchdb/lucene/.

Source file: CustomQueryParser.java

  19 
vote

@Override protected Query getFieldQuery(final String field,final String queryText,final boolean quoted) throws ParseException {
  final TypedField typedField=new TypedField(field);
  if (typedField.getType() == FieldType.STRING) {
    return super.getFieldQuery(field,queryText,quoted);
  }
  return typedField.toTermQuery(queryText);
}
 

Example 28

From project HBasePS, under directory /src/main/java/ch/sentric/hbase/coprocessor/.

Source file: ProspectiveSearchRegionObserver.java

  19 
vote

@Override public void postPut(ObserverContext<RegionCoprocessorEnvironment> e,Put put,WALEdit edit,boolean writeToWAL) throws IOException {
  LOG.debug("inside postPut hook");
  if (Bytes.compareTo(ArticleTable.NAME,e.getEnvironment().getRegion().getTableDesc().getName()) == 0) {
    LOG.debug("Load agents...");
    this.queries=this.queryDao.getQueries();
    try {
      final Map<String,Query> parsedQueries=this.parseQueries(queries);
      for (      Map.Entry<String,Query> entry : parsedQueries.entrySet()) {
        LOG.debug(entry.getKey() + " -> " + entry.getValue().toString());
      }
      final Document doc=buildDocument(put);
      Response<String> result=this.percolator.percolate(doc,parsedQueries);
      if (result != null && result.hasMatch()) {
        HTable tbl=rm.getTable(ReportTable.NAME);
        for (        Map.Entry<String,Query> entry : result.getMatches().entrySet()) {
          LOG.debug("Matched: " + entry.getKey() + " -> "+ entry.getValue());
          tbl.put(preparePut(entry.getKey() + "/" + Long.toString(put.getTimeStamp()),put.getRow()));
        }
        tbl.close();
      }
 else {
        LOG.debug("No query matched the given document");
      }
    }
 catch (    ParseException ex) {
      LOG.error("Error parsing queries",ex);
    }
  }
}
 

Example 29

From project jackrabbit-oak, under directory /oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/.

Source file: LuceneIndex.java

  19 
vote

private static void addReferenceConstraint(String uuid,List<Query> qs,IndexReader reader){
  if (reader == null) {
    qs.add(new TermQuery(new Term("*",uuid)));
    return;
  }
  BooleanQuery bq=new BooleanQuery();
  Collection<String> fields=MultiFields.getIndexedFields(reader);
  for (  String f : fields) {
    bq.add(new TermQuery(new Term(f,uuid)),Occur.SHOULD);
  }
  qs.add(bq);
}
 

Example 30

From project james-mailbox, under directory /lucene/src/main/java/org/apache/james/mailbox/lucene/search/.

Source file: LuceneMessageSearchIndex.java

  19 
vote

/** 
 * Return a  {@link Query} which is build based on the given {@link SearchQuery.InternalDateCriterion}
 * @param crit
 * @return query
 * @throws UnsupportedSearchException
 */
private Query createInternalDateQuery(SearchQuery.InternalDateCriterion crit) throws UnsupportedSearchException {
  DateOperator dop=crit.getOperator();
  DateResolution res=dop.getDateResultion();
  String field=toInteralDateField(res);
  return createQuery(field,dop);
}
 

Example 31

From project lenya, under directory /org.apache.lenya.module.lucene/src/main/java/org/apache/cocoon/components/search/components/impl/.

Source file: AbstractSearcher.java

  19 
vote

public Hits search(Query query) throws ProcessingException {
  try {
    getLuceneSearcher();
    if (sortfield == null) {
      return luceneSearcher.search(query);
    }
 else {
      return luceneSearcher.search(query,new Sort(sortfield));
    }
  }
 catch (  IOException e) {
    throw new ProcessingException(e);
  }
}
 

Example 32

From project lucene-interval-fields, under directory /src/test/java/com/greplin/interval/.

Source file: BaseIntervalQueryTest.java

  19 
vote

protected void assertSearch(IndexSearcher searcher,Query query,Integer... expectedResults) throws IOException {
  Set<Integer> expected=ImmutableSet.copyOf(expectedResults);
  TopDocs docs=searcher.search(query,100);
  Set<Integer> actual=Sets.newHashSet();
  for (  ScoreDoc scoreDoc : docs.scoreDocs) {
    Document doc=searcher.doc(scoreDoc.doc);
    actual.add(Integer.valueOf(doc.get("id")));
  }
  Assert.assertEquals(query + " should match [" + Joiner.on(", ").join(expectedResults)+ "]",expected,actual);
}