这篇文章主要为大家详细介绍了基于C#实现网页爬虫的相关资料,具有一定的参考价值,感兴趣的小伙伴们可以参考一下
本文实例为大家分享了基于C#实现网页爬虫的详细代码,供大家参考,具体内容如下
HTTP请求工具类:
功能:
1、获取网页html
2、下载网络图片
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace Utils
{
/// <summary>
/// HTTP请求工具类
/// </summary>
public class HttpRequestUtil
{
/// <summary>
/// 获取页面html
/// </summary>
public static string GetPageHtml(string url)
{
// 设置参数
HttpWebRequest request = WebRequest.Create(url) as HttpWebRequest;
request.UserAgent = "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)";
//发送请求并获取相应回应数据
HttpWebResponse response = request.GetResponse() as HttpWebResponse;
//直到request.GetResponse()程序才开始向目标网页发送Post请求
Stream responseStream = response.GetResponseStream();
StreamReader sr = new StreamReader(responseStream, Encoding.UTF8);
//返回结果网页(html)代码
string content = sr.ReadToEnd();
return content;
}
/// <summary>
/// Http下载文件
/// </summary>
public static void HttpDownloadFile(string url)
{
int pos = url.LastIndexOf("/") + 1;
string fileName = url.Substring(pos);
string path = Application.StartupPath + "download";
if (!Directory.Exists(path))
{
Directory.CreateDirectory(path);
}
string filePathName = path + "" + fileName;
if (File.Exists(filePathName)) return;
// 设置参数
HttpWebRequest request = WebRequest.Create(url) as HttpWebRequest;
request.UserAgent = "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)";
request.Proxy = null;
//发送请求并获取相应回应数据
HttpWebResponse response = request.GetResponse() as HttpWebResponse;
//直到request.GetResponse()程序才开始向目标网页发送Post请求
Stream responseStream = response.GetResponseStream();
//创建本地文件写入流
Stream stream = new FileStream(filePathName, FileMode.Create);
byte[] bArr = new byte[1024];
int size = responseStream.Read(bArr, 0, (int)bArr.Length);
while (size > 0)
{
stream.Write(bArr, 0, size);
size = responseStream.Read(bArr, 0, (int)bArr.Length);
}
stream.Close();
responseStream.Close();
}
}
}










