java/code/src/com/redhat/rhn/frontend/action/errata/ErrataSearchAction.java
| 4
search-server/spacewalk-search/buildconf/exclude
| 2
search-server/spacewalk-search/scripts/src/java/com/redhat/satellite/search/index/docs/WebCrawl.java
| 304 ----------
search-server/spacewalk-search/src/config/com/redhat/satellite/search/db/errata_handler.xml
| 2
search-server/spacewalk-search/src/java/com/redhat/satellite/search/scheduler/tasks/IndexDocumentsTask.java
| 140 ----
5 files changed, 2 insertions(+), 450 deletions(-)
New commits:
commit 7cacc612569df36d2af6826ce2889755edcfc7b3
Author: Justin Sherrill <jsherril(a)redhat.com>
Date: Fri May 14 11:47:01 2010 -0400
576953 - fixing errata search case sensitivity and not searching on partial cve name
diff --git a/java/code/src/com/redhat/rhn/frontend/action/errata/ErrataSearchAction.java
b/java/code/src/com/redhat/rhn/frontend/action/errata/ErrataSearchAction.java
index f412422..0c2cced 100644
--- a/java/code/src/com/redhat/rhn/frontend/action/errata/ErrataSearchAction.java
+++ b/java/code/src/com/redhat/rhn/frontend/action/errata/ErrataSearchAction.java
@@ -574,9 +574,7 @@ public class ErrataSearchAction extends RhnAction {
return "(name:(" + query + ") filename:(" + query +
"))";
}
else if (OPT_CVE.equals(mode)) {
- if (query.trim().toLowerCase().indexOf("cve-") == -1) {
- query = "CVE-" + query;
- }
+ query = "%" + query.toLowerCase() + "%";
return "listErrataByCVE:(" + query + ")";
}
diff --git
a/search-server/spacewalk-search/src/config/com/redhat/satellite/search/db/errata_handler.xml
b/search-server/spacewalk-search/src/config/com/redhat/satellite/search/db/errata_handler.xml
index 7c6cece..6dc5f19 100644
---
a/search-server/spacewalk-search/src/config/com/redhat/satellite/search/db/errata_handler.xml
+++
b/search-server/spacewalk-search/src/config/com/redhat/satellite/search/db/errata_handler.xml
@@ -23,7 +23,7 @@
ac.org_id = wc.org_id and
ps.web_user_id = wc.id and
ps.id = #sessionId# and
- cve.name = #param0#
+ LOWER(cve.name) like #param0#
</select>
<select id="listErrataByIssueDateRange"
commit 7fbdf029cf8666a5033032755da3421af4c69f5a
Author: Justin Sherrill <jsherril(a)redhat.com>
Date: Thu May 13 15:03:17 2010 -0400
removing some dead code from the search server
diff --git a/search-server/spacewalk-search/buildconf/exclude
b/search-server/spacewalk-search/buildconf/exclude
index 7873251..e69de29 100644
--- a/search-server/spacewalk-search/buildconf/exclude
+++ b/search-server/spacewalk-search/buildconf/exclude
@@ -1,2 +0,0 @@
-**/WebCrawl.java
-**/IndexDocumentsTask.java
diff --git
a/search-server/spacewalk-search/scripts/src/java/com/redhat/satellite/search/index/docs/WebCrawl.java
b/search-server/spacewalk-search/scripts/src/java/com/redhat/satellite/search/index/docs/WebCrawl.java
deleted file mode 100644
index d656813..0000000
---
a/search-server/spacewalk-search/scripts/src/java/com/redhat/satellite/search/index/docs/WebCrawl.java
+++ /dev/null
@@ -1,304 +0,0 @@
-/**
- * Copyright (c) 2008--2010 Red Hat, Inc.
- *
- * This software is licensed to you under the GNU General Public License,
- * version 2 (GPLv2). There is NO WARRANTY for this software, express or
- * implied, including the implied warranties of MERCHANTABILITY or FITNESS
- * FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
- * along with this software; if not, see
- *
http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * Red Hat trademarks are not licensed under GPLv2. No permission is
- * granted to use or replicate Red Hat trademarks that are incorporated
- * in this software or its documentation.
- */
-package com.redhat.satellite.search.index.docs;
-
-import org.apache.nutch.crawl.Injector;
-import org.apache.nutch.crawl.Generator;
-import org.apache.nutch.crawl.CrawlDb;
-import org.apache.nutch.crawl.LinkDb;
-import org.apache.nutch.fetcher.Fetcher;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.nutch.parse.ParseSegment;
-import org.apache.nutch.indexer.DeleteDuplicates;
-import org.apache.nutch.indexer.IndexMerger;
-import org.apache.nutch.indexer.Indexer;
-import org.apache.nutch.util.HadoopFSUtil;
-import org.apache.nutch.util.NutchConfiguration;
-import org.apache.nutch.util.NutchJob;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
-import org.apache.commons.cli.PosixParser;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Options;
-
-import org.apache.commons.cli.ParseException;
-
-import org.apache.log4j.Logger;
-
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.io.IOException;
-
-
-/**
-Crawls a set of urls and indexes their content.
-Based off of nutch's crawl
- *
- * @version $Rev: $
- */
-public class WebCrawl {
- private static Logger log = Logger.getLogger(WebCrawl.class);
-
- private Configuration conf;
- private JobConf job;
- private String inputUrlFile;
- private String tmpCrawlDir;
- private String outputIndexDir;
-
- private int depth;
- private int threads;
- private long topN;
-
- /**
- * Constructor
- */
- public WebCrawl() {
- conf = NutchConfiguration.create();
- conf.addResource("crawl-tool.xml");
- job = new NutchJob(conf);
- threads = job.getInt("fetcher.threads.fetch", 10);
- depth = 5;
- topN = Long.MAX_VALUE;
- }
-
- /**
- * @return Returns the dir used to store tmp crawl data
- */
- public String getTmpCrawlDir() {
- return tmpCrawlDir;
- }
- /**
- * @param tmpCrawlDirIn the dir used to store tmp crawl data
- */
- public void setTmpCrawlDir(String tmpCrawlDirIn) {
- tmpCrawlDir = tmpCrawlDirIn;
- }
- /**
- * @return Returns the file used to seed the url crawl
- */
- public String getInputUrlFile() {
- return inputUrlFile;
- }
- /**
- * @param inputUrlFileIn file used to seed the url crawl
- */
- public void setInputUrlFile(String inputUrlFileIn) {
- inputUrlFile = inputUrlFileIn;
- }
- /**
- * @return Returns the dir to store the index of the crawled docs
- */
- public String getOutputIndexDir() {
- return outputIndexDir;
- }
- /**
- * @param outputIndexDirIn the dir to store the index of the crawled docs
- */
- public void setOutputIndexDir(String outputIndexDirIn) {
- outputIndexDir = outputIndexDirIn;
- }
- /**
- * @return Returns the maximum depth of recursion allowed
- */
- public int getDepth() {
- return depth;
- }
- /**
- * @param depthIn the maximum depth of recursion allowed
- */
- public void setDepth(int depthIn) {
- depth = depthIn;
- }
- /**
- * @return Returns the maximum number of fetcher threads used
- */
- public int getThreads() {
- return threads;
- }
- /**
- * @param threadsIn the maximum number of fetcher threads used
- */
- public void setThreads(int threadsIn) {
- threads = threadsIn;
- }
- /**
- * @return Returns the maximum number of "out links" to follow in any given
page
- */
- public long getTopN() {
- return topN;
- }
- /**
- * @param topNIn the maximum number of "out links" to follow in any given
page
- */
- public void setTopN(long topNIn) {
- topN = topNIn;
- }
-
- protected String getDate() {
- return new SimpleDateFormat("yyyyMMddHHmmss").format(
- new Date(System.currentTimeMillis()));
- }
-
- /**
- * Uses nutch to crawl a list of ulrs defined in "inputUrlFile"
- * Temporary scratch storage is held at "tmpCrawlDir"
- * The desired output is an index of crawled pages, stored at
"outputIndexDir"
- *
- * @return true when urls successfully crawled/indexed
- */
- @SuppressWarnings("deprecation")
- public boolean crawl() throws IOException {
- log.info("Performing crawl with following config options: ");
- log.info("inputUrlFile = " + inputUrlFile);
- log.info("tmpCrawlDir = " + tmpCrawlDir);
- log.info("outputIndexDir = " + outputIndexDir);
- log.info("threads = " + threads);
- log.info("depth = " + depth);
- log.info("topN = " + topN);
-
- Path inputPath = new Path(inputUrlFile);
- Path linkDb = new Path(tmpCrawlDir + "/linkdb");
- Path segments = new Path(tmpCrawlDir + "/segments");
- Path indexes = new Path(tmpCrawlDir + "/indexes");
- Path index = new Path(outputIndexDir);
- Path crawlDb = new Path(tmpCrawlDir + "/crawldb");
-
- Path tmpDir = job.getLocalPath("crawl" + Path.SEPARATOR + getDate());
- Injector injector = new Injector(conf);
- Generator generator = new Generator(conf);
- Fetcher fetcher = new Fetcher(conf);
- ParseSegment parseSegment = new ParseSegment(conf);
- CrawlDb crawlDbTool = new CrawlDb(conf);
- LinkDb linkDbTool = new LinkDb(conf);
- Indexer indexer = new Indexer(conf);
- DeleteDuplicates dedup = new DeleteDuplicates(conf);
- IndexMerger merger = new IndexMerger(conf);
-
- FileSystem fs = FileSystem.get(job);
-
- log.info("Create a new database (crawlDB) of link information");
- injector.inject(crawlDb, inputPath);
-
- for (int i = 0; i < depth; i++) {
- log.info("Generate a fetch list from info in the crawlDB.");
- Path segment = generator.generate(crawlDb, segments, -1, topN,
- System.currentTimeMillis());
- if (segment == null) {
- log.info("Stopping at depth = " + i + " instead of "
+ (depth - 1) +
- " - no more URLs to fetch.");
- break;
- }
- log.info("Fetch links");
- fetcher.fetch(segment, threads); // fetch it
- if (!Fetcher.isParsing(job)) {
- log.info("Parsing Segment");
- parseSegment.parse(segment); // parse it, if needed
- }
- Path[] p = new Path[1];
- p[0] = segment;
- log.info("Update CrawlDB");
- crawlDbTool.update(crawlDb, p, true, true); // update crawldb
- }
-
- linkDbTool.invert(linkDb, segments, true, true, false); // invert links
-
- // Delete old indexes
- if (fs.exists(indexes)) {
- log.info("Deleting old indexes: $indexes");
- fs.delete(indexes);
- }
-
- // Delete old index
- if (fs.exists(index)) {
- log.info("Deleting old merged index: $index");
- fs.delete(index);
- }
-
- // index, dedup & merge
- indexer.index(indexes, crawlDb, linkDb,
- fs.listPaths(segments, HadoopFSUtil.getPassAllFilter()));
-
- Path[] p = new Path[1];
- p[0] = indexes;
- dedup.dedup(p);
- merger.merge(fs.listPaths(indexes, HadoopFSUtil.getPassAllFilter()),
- index, tmpDir);
- log.info("Crawl finished");
- return true;
- }
-
-
- /**
- * Performs a web crawl:
- * @param args command line arguments
- */
- public static void main(String[] args) throws IOException {
-
- WebCrawl wCrawl = new WebCrawl();
-
- Options options = new Options();
- options.addOption("i", "inputUrlFile", true,
- "file holding 'urls' file to seed web page crawling");
- options.addOption("o", "outputDir", true, "temp crawl
output dir");
- options.addOption("x", "docsIndexDir", true, "docs index
output dir");
- options.addOption("t", "threads", true, "number of
threads");
- options.addOption("d", "depth", true, "depth to recurse
towards");
- options.addOption("n", "topN", true, "maximum number of
out links to follow");
- options.addOption("h", "help", false, "print help
message");
-
- CommandLineParser parser = new PosixParser();
- try {
- CommandLine line = parser.parse(options, args);
-
- if (line.hasOption("h")) {
- HelpFormatter formatter = new HelpFormatter();
- formatter.printHelp("WebCrawl", options);
- return;
- }
- if (line.hasOption("i")) {
- wCrawl.setInputUrlFile(line.getOptionValue("i"));
- }
- if (line.hasOption("x")) {
- wCrawl.setOutputIndexDir(line.getOptionValue("x"));
- }
- if (line.hasOption("o")) {
- wCrawl.setTmpCrawlDir(line.getOptionValue("o"));
- }
- if (line.hasOption("t")) {
- wCrawl.setThreads(Integer.parseInt(line.getOptionValue("t")));
- }
- if (line.hasOption("d)")) {
- wCrawl.setDepth(Integer.parseInt(line.getOptionValue("d")));
- }
- if (line.hasOption("n")) {
- wCrawl.setTopN(Integer.parseInt(line.getOptionValue("n")));
- }
-
- }
- catch (ParseException exp) {
- System.err.println("Parsing failed. Reason: " +
exp.getMessage());
- }
-
- if (!wCrawl.crawl()) {
- System.err.println("Error -- WebCrawl.crawl() Failed!");
- }
- }
-
-
-}
diff --git
a/search-server/spacewalk-search/src/java/com/redhat/satellite/search/scheduler/tasks/IndexDocumentsTask.java
b/search-server/spacewalk-search/src/java/com/redhat/satellite/search/scheduler/tasks/IndexDocumentsTask.java
deleted file mode 100644
index 9e36602..0000000
---
a/search-server/spacewalk-search/src/java/com/redhat/satellite/search/scheduler/tasks/IndexDocumentsTask.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/**
- * Copyright (c) 2008--2010 Red Hat, Inc.
- *
- * This software is licensed to you under the GNU General Public License,
- * version 2 (GPLv2). There is NO WARRANTY for this software, express or
- * implied, including the implied warranties of MERCHANTABILITY or FITNESS
- * FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
- * along with this software; if not, see
- *
http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * Red Hat trademarks are not licensed under GPLv2. No permission is
- * granted to use or replicate Red Hat trademarks that are incorporated
- * in this software or its documentation.
- */
-package com.redhat.satellite.search.scheduler.tasks;
-
-import com.redhat.satellite.search.config.Configuration;
-import com.redhat.satellite.search.index.IndexManager;
-import com.redhat.satellite.search.scheduler.tasks.crawl.WebCrawl;
-
-import org.apache.log4j.Logger;
-
-import org.quartz.JobDataMap;
-import org.quartz.StatefulJob;
-import org.quartz.JobExecutionContext;
-import org.quartz.JobExecutionException;
-
-import java.io.File;
-import java.io.IOException;
-
-
-/**
- * Task to index help documents
- *
- * @version $Rev: $
- */
-public class IndexDocumentsTask implements StatefulJob {
- // We do _not_ want this task to run concurrently with itself,
- // therefore using StatefulJob.
- private static Logger log = Logger.getLogger(IndexDocumentsTask.class);
- public static final String TASK_REINDEX = new String("TASK.RE-INDEX");
- public static final String TASK_COMPLETE = new String("TASK.COMPLETE");
-
-
- /**
- * {@inheritDoc}
- */
- public void execute(JobExecutionContext ctx)
- throws JobExecutionException {
- JobDataMap jobData = ctx.getJobDetail().getJobDataMap();
- IndexManager indexManager = (IndexManager)
jobData.get("indexManager");
-
- String indexWorkDir = indexManager.getIndexWorkDir();
- if ((indexWorkDir == null) || (indexWorkDir.compareTo("") == 0)) {
- throw new JobExecutionException("indexWorkDir invalid");
- }
- File docsIndexDir = new File(indexWorkDir + File.separator +
- IndexManager.DOCS_INDEX_NAME);
- File indexCheck = new File(docsIndexDir, TASK_REINDEX);
- File success = new File(docsIndexDir, TASK_COMPLETE);
-
- boolean reindex = false;
-
- log.info("task running");
-
- if (!docsIndexDir.exists()) {
- // first time running, the index needs to be created from scratch
- log.info("Creating directory: " + docsIndexDir.getPath());
- if (!docsIndexDir.mkdirs()) {
- throw new JobExecutionException("Unable to create dir: " +
- docsIndexDir.getPath());
- }
- reindex = true;
- }
- else {
- // Will reindex if a TASK.RE-INDEX is present or
- // we don't see TASK.COMPLETE
- log.info("docsIndexDir<" + docsIndexDir + ">
exists");
- if (indexCheck.exists()) {
- log.info("Found (" + indexCheck.getPath() +
- "), Index needs to be updated.");
- reindex = true;
- }
- else if (!success.exists()) {
- log.info("Index exists, but it appears to be incomplete, missing
" +
- success.getPath());
- reindex = true;
- }
- }
-
- if (reindex) {
- //We need to communicate index is incomplete by removing the success file
-
- if (success.exists()) {
- if (!success.delete()) {
- throw new JobExecutionException("Unable to delete file: " +
- success.getPath());
- }
- }
- if ((indexCheck != null) && (indexCheck.exists())) {
- // cleanup the file which triggered the re-index
- if (!indexCheck.delete()) {
- throw new JobExecutionException("Unable to delete file: " +
- indexCheck.getPath());
- }
- }
- try {
- Configuration config =
(Configuration)jobData.get("configuration");
- if (!reIndexDocs(docsIndexDir.getPath(), config)) {
- log.error("Failed reindexing documents to : ");
- throw new JobExecutionException("Reindexing of Documents
failed.");
- }
- //Mark that docs have been indexed and are ready to be used.
- success.createNewFile();
- }
- catch (IOException e) {
- throw new JobExecutionException(e);
- }
- }
- }
-
- protected boolean reIndexDocs(String docsIndexDir, Configuration config)
- throws IOException {
-
- int threads = config.getInt("search.nutch.threads", 10);
- int depth = config.getInt("search.nutch.depth", 5);
- String inputUrlFile = config.getString("search.nutch.inputUrlFile",
"nutch/urls");
- String tmpCrawlDir = config.getString("search.nutch.tmpDir",
"/tmp/crawl_output");
-
- WebCrawl wCrawl = new WebCrawl();
- wCrawl.setDepth(depth);
- wCrawl.setInputUrlFile(inputUrlFile);
- wCrawl.setOutputIndexDir(docsIndexDir);
- wCrawl.setThreads(threads);
- wCrawl.setTmpCrawlDir(tmpCrawlDir);
-
- return wCrawl.crawl();
- }
-
-}