public static String SendGET(String url,String param){
String result="";//访问返回结果
BufferedReader read=null;//读取访问结果
try {
//创建url
URL realurl=new URL(url+""+param);
//打开连接
URLConnection connection=realurlopenConnection();
// 设置通用的请求属性
connectionsetRequestProperty("accept", "/");
connectionsetRequestProperty("connection", "Keep-Alive");
connectionsetRequestProperty("user-agent",
"Mozilla/40 (compatible; MSIE 60; Windows NT 51;SV1)");
//建立连接
connectionconnect();
// 获取所有响应头字段
Map<String, List<String>> map = connectiongetHeaderFields();
// 遍历所有的响应头字段,获取到cookies等
for (String key : mapkeySet()) {
Systemoutprintln(key + "--->" + mapget(key));
}
// 定义 BufferedReader输入流来读取URL的响应
read = new BufferedReader(new InputStreamReader(
connectiongetInputStream(),"UTF-8"));
String line;//循环读取
while ((line = readreadLine()) != null) {
result += line;
}
} catch (IOException e) {
eprintStackTrace();
}finally{
if(read!=null){//关闭流
try {
readclose();
} catch (IOException e) {
eprintStackTrace();
}
}
}
return result;
}
URL aurl = new URL(url);
BufferedReader br = new BufferedReader(new InputStreamReader(aurl
openStream()));
FileWriter fw = new FileWriter("d:/ahtml");
String line = "";
while (line != null)
{
line = brreadLine();
fwwrite(line);
}
fwflush();
fwclose();
//读取网页上的内容方法---------------------20100125 public String getOneHtml(String htmlurl) throws IOException {
URL url;
String temp;
final StringBuffer sb = new StringBuffer();
try {
url = new URL(htmlurl);
// 读取网页全部内容
final BufferedReader in = new BufferedReader(new InputStreamReader(
urlopenStream(),"GBK"));
while ((temp = inreadLine()) != null) {
sbappend(temp);
}
inclose();
} catch (final MalformedURLException me) {
Systemoutprintln("你输入的URL格式有问题!请仔细输入");
megetMessage();
} catch (final IOException e) {
eprintStackTrace();
}
return sbtoString();
}上面这个方法是根据你传入的url爬取整个网页的内容,然后你写个正则表达式去匹配这个字符串的内容。
以上就是关于Java请求一个URL。获取网站返回的数据。全部的内容,包括:Java请求一个URL。获取网站返回的数据。、java程序怎么读取html网页、用java写爬虫程序,有个网站获取不到链接,求指导等相关内容解答,如果想了解更多相关内容,可以关注我们,你们的支持是我们更新的动力!
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)