<!--爬虫仅支持1.8版本的jdk-->
<!-- 爬虫需要的依赖-->
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.2</version>
</dependency>
<!-- 爬虫需要的日志依赖-->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.25</version>
</dependency>
爬虫配置文件位置及存放位置
package day02; import org.apache.http.HttpEntity; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.utils.URIBuilder; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.apache.http.util.EntityUtils; import java.io.IOException; import java.net.URISyntaxException; public class pacohngde { public static void main(String[] args) throws IOException, URISyntaxException { //注意这个方法是爬取网址所有位置 //1.打开浏览器,创建Httpclient对象 // CloseableHttpclient httpclient = Httpclients.createDefault(); CloseableHttpClient aDefault = HttpClients.createDefault(); // 组合示例https://search.bilibili.com/all?keyword=药水哥&search_source=1 //创建URTBuilder 说白就是把网站组合起来使用搜索功能 URIBuilder uriBuilder = new URIBuilder("https://search.bilibili.com/all"); //设置参数 uriBuilder.setParameter("keyword","药水哥").setParameter("search_source","1"); //2.输入网址,发起get请求创建HttpGet对象 输入你需要爬取的网址 HttpGet httpGet = new HttpGet(uriBuilder.build()); System.out.println("要爬取的网址"+httpGet); //3.按回车,发起请求,返回响应,使用httpclient对象发起请求 CloseableHttpResponse response = aDefault.execute(httpGet); //4.解析响应,获取数据//判断状态码是否是200 200为正常型号 其他为异常 if(response.getStatusLine().getStatusCode()== 200){ //获取爬取数据 HttpEntity httpEntity =response.getEntity(); //将爬取数据解析为utf-8格式 String content = EntityUtils.toString(httpEntity,"utf8"); //打印 System.out.println(content); } //释放资源 response.close(); //关闭网页 aDefault.close(); } }