目录
字符数量限定其他爬虫补充:正则表达式加golang爬虫爬取经典案例豆瓣top250最近学习go,爬取网站数据用到正则表达式,做个总结;
Go中正则表达式采用RE2语法(具体是啥咱也不清楚);
字符
.——匹配任意字符 e.g: abc. 结果: abcd,abcx,abc9; [] ——匹配括号中任意一个字符 e.g: [abc]d 结果:ad,cd,1d; - ——[-]中表示范围 e.g: [A-Za-z0-9]; ^——[^]中表示除括号中的任意字符 e.g:[^xy]a 结果:aa,da,不能为xa,ya;数量限定
?——前面单元匹配0或1次; + ——前面单元匹配1或多次; * ——前面单元匹配0或多次; {,}——显示个数上下线;e.g : ip地址——[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3};其他
——转义字符; |——条件或; ()——组成单元 如果字符串本身有括号"[(] aaa. [)]" ;方法
//参数正则字符串,返回值*Regexpstr := regexp.MustCompile(string) //参数要查找的数据,查找次数-1为全局,返回值二维数组,查找出的字符串+正则字符串var result [][]string = str.FindAllStringSubmatch(data, -1)
爬虫
爬取博客园所有文章阅读量,评论,推荐;
package mainimport ( "fmt" "io" "net/http" "regexp" "strconv")var readCount int = 0var commentCount int = 0var diggCount int = 0//http读取网页数据写入result返回func HttpGet(url string) (result string, err error) { resp, err1 := http.Get(url) if err1 != nil { err = err1 return } defer resp.Body.Close() buf := make([]byte, 4096) for { n, err2 := resp.Body.Read(buf) //fmt.Println(url) if n == 0 { break } if err2 != nil && err2 != io.EOF { err = err2 returnn>") alls := str.FindAllStringSubmatch(result, -1) for _, j := range alls { temp, err := strconv.Atoi(j[1]) if err != nil { fmt.Println("string2int err:", err) } readCount += temp } str = regexp.MustCompile("post-comment-count">评论[(](?s:(.*?))[)]</span>") alls = str.FindAllStringSubmatch(result, -1) for _, j := range alls { temp, err := strconv.Atoi(j[1]) if err != nil { fmt.Println("string2int err:", err) } commentCount += temp } str = regexp.MustCompile("post-digg-count">推荐[(](?s:(.*?))[)]</span>") alls = str.FindAllStringSubmatch(result, -1) for _, j := range alls { temp, err := strconv.Atoi(j[1]) if err != nil { fmt.Println("string2int err:", err) } diggCount += temp } page <- index}//主要工作方法func working(start, end int) { fmt.Printf("正在从%d到%d爬取中...n", start, end) //channel通知主线程是否所有go都结束 page := make(chan int) //多线程go程同时爬取 for i := start; i <= end; i++ { go SpiderPageDB(i, page) } for i := start; i <= end; i++ { fmt.Printf("拉取到%d页n", <-page) re [][]string) {f, err := os.Create("第" + strconv.Itoa(index) + "页.txt")if err != nil {fmt.Println("os create err", err)return}defer f.Close()// 查出有多少条n := len(filmName)// 先写抬头 名称 评分f.WriteString("电影名称" + "ttt" + "评分" + "n")for i := 0; i < n; i++ {f.WriteString(filmName[i][1] + "ttt" + filmScore[i][1] + "n")}}func main() {var start, end intfmt.Print("请输入要爬取的起始页")fmt.Scan(&start)fmt.Print("请输入要爬取的终止页")fmt.Scan(&end)working(start, end)}func working(start int, end int) {fmt.Printf("正在爬取%d到%d页", start, end)for i := start; i <=OYKLjIZpM end; i++ {SpiderPage(i)}}// 爬取一个豆瓣页面数据信息保存到文档func SpiderPage(index int) {// 获取urlurl := "https://movie.douban.com/top250?start=" + strconv.Itoa((index-1)*25) + "&filter="// 爬取url对应页面result, err := HttpGet(url)if err != nil {fmt.Println("httpget err", err)return}//fmt.Println("result=", result)// 解析,编译正则表达式 ---电影名称ret := regexp.MustCompile(`<img width="100" alt="(?s:(.*?))"`)filmName := ret.FindAllStringSubmatch(result, -1)for _, name := range filmName {fmt.Println("name", name[1])}ret2 := regexp.MustCompile(`<span class="rating_num" property="v:average">(?s:(.*?))<`)filmScore := ret2.FindAllStringSubmatch(result, -1)for _, score := range filmScore {fmt.Println("score", score[1])}savToFile(index, filmName, filmScore)}// 爬取指定url页面,返回resultfunc HttpGet(url string) (result string, err error) {req, _ := http.NewRequest("GET", url, nil)// 设置头部信息req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36 OPR/66.0.3515.115")resp, err1 := (&http.Client{}).Do(req)//resp, err1 := http.Get(url) //此方法已经被豆瓣视为爬虫,返回状态吗为418,所以必须伪装头部用上述办法if err1 != nil {err = err1return}defer resp.Body.Close()buf := make([]byte, 4096)//循环爬取整页数据for {n, err2 := resp.Body.Read(buf)if n == 0 {break}if err2 != nil && err2 != io.EOF {err = err2return}result += string(buf[:n])}return}









