Map urlMap = new HashMap();
extractHttpUrls(urlMap, text);
extractRelativeUrls(urlMap, text, crawlerUrl);
return new ArrayList(urlMap.keySet());
}
private void extractHttpUrls(Map urlMap, String text) {
Matcher m = (text);
while (m.find()) {
String url = m.group();
String[] terms = url.split("a href=https://www.04ip.com/"");
for (String term : terms) {
// System.out.println("Term = " + term);
if (term.startsWith("http")) {
int index = term.indexOf("\"");
if (index0) {
term = term.substring(0, index);
}
urlMap.put(term, term);
System.out.println("Hyperlink: " + term);
}
}
}
}
private void extractRelativeUrls(Map urlMap, String text,
CrawlerUrl crawlerUrl) {
Matcher m = relativeRegexp.matcher(text);
URL textURL = crawlerUrl.getURL();
String host = textURL.getHost();
while (m.find()) {
String url = m.group();
String[] terms = url.split("a href=https://www.04ip.com/"");
for (String term : terms) {
if (term.startsWith("/")) {
int index = term.indexOf("\"");
if (index0) {
term = term.substring(0, index);
}
String s = //" + host + term;
urlMap.put(s, s);
System.out.println("Relative url: " + s);
}
}
}
}
public static void main(String[] args) {
try {
String url = "";
Queue urlQueue = new LinkedList();
String regexp = "java";
urlQueue.add(new CrawlerUrl(url, 0));
NaiveCrawler crawler = new NaiveCrawler(urlQueue, 100, 5, 1000L,
regexp);
// boolean allowCrawl = crawler.areWeAllowedToVisit(url);
// System.out.println("Allowed to crawl: " + url + " " +
// allowCrawl);
crawler.crawl();
} catch (Throwable t) {
System.out.println(t.toString());
t.printStackTrace();
}
}
【java爬虫代码解读 java爬虫技术原理】java爬虫代码解读的介绍就聊到这里吧,感谢你花时间阅读本站内容 , 更多关于java爬虫技术原理、java爬虫代码解读的信息别忘了在本站进行查找喔 。
推荐阅读
- uipath直播安装教程,uipath社区版安装流程
- pg348q底座怎么安装,pg348q拆支架
- 默认浏览器下载,默认浏览器下载的文件在哪
- 高跷角色扮演游戏,高跷人游戏
- 命令分割linux文件 linux 分割文本文件
- mysql中update更新多个字段,mysql 更新多个字段
- 怎么看独立显卡和集成显卡,怎么看是独立显卡还是集成
- java代码下载积分 java积分管理系统
- 如何营销茶店,茶店营销策略