当前位置:必发365电子游戏 > 编程 > 然后下面的代码是老师写的,比如Parallelpython就是用来做分布式计算的
然后下面的代码是老师写的,比如Parallelpython就是用来做分布式计算的
2019-12-19

互相编制程序轻便说就是处理多职分,能够用多进度也足以用十二线程。
重在是用到threading和Multiprocess那七个包。并行编制程序的思绪主要有二种,意气风发种是MapReduce,分而治之。还也许有风度翩翩种是流水,Product Consume。
笔者们学会了相互影响编制程序今后能够用它来做布满式总结,举个例子Parallelpython便是用来做分布式总计的。
Celery+RabbitMQ/Redis遍及式义务队列搭配Django能够兑现赶快的异步任务队列。
Gevent是用来做高效异步IO以至协程。
地方那几个作者都不懂

再看一下昨天不胜爬虫吧
要么贴代码,然后作者前日出去一个小错误,未有察觉标题在哪,还索要破除。(刚刚找到错误了,代码放在最上面)

#!/usr/bin/env python
# coding: utf-8
#copyRight by heibanke

import urllib
import os
import re
from threading import Thread
import time

def downloadURL(urls,dirpath):
    for url in urls:
        if len(url)>0:
            content = urllib.urlopen(url).read()
            if not os.path.exists(dirpath):
                os.makedirs(dirpath)
            open(dirpath+r'/'+url[-26],'w').write(content)#这一行-26那个地方缺个冒号,导致反复写一个文件。

def parseTarget(url):
    urls=[]
    content=urllib.urlopen(url).read()
    pattern = r'<a title=(.*?) href="(.*?)">'
    hrefs = re.findall(pattern,content)

    for href in hrefs:
        urls.append(href[1])
    return urls

def thread_job(n,Thread,url_list,job):
    local_time = time.time()
    threads = [Thread(target=job,args=(url_list[i],str(n)+Thread.__name__)) for i in xrange(n)]
    for t in threads:
        t.start()
    for t in threads:
        t.join()
    print n,Thread.__name__,"run job need",time.time()-local_time

if __name__=="__main__":
    t=time.time()
    urls=[]
    for i in xrange(7):
        urls.extend(parseTarget('http://blog.sina.com.cn/s/articlelist_1191258123_0_'+str(i+1)+'.html'))

    url_len = len(urls)

    print "total urls number is ",url_len

    for n in [4]:
        url_list=[]
        url_split_len = url_len//n
        for i in xrange(n):
            if i in xrange(n):#这一句写重了!
                if i==n-1:
                    url_list.append(urls[i*url_split_len:url_len])
                else:
                    url_list.append(urls[i*url_split_len:(i+1)*url_split_len])
        thread_job(n,Thread,url_list,downloadURL)
    print "All done in",time.time()-t

先来看一下,那个是由此re模块把最近网页的文章列表都保存到三个列表里,首先定义了二个urls,content是对url内容的分析,借使在上面加个print content能够打字与印刷出网页源码。pattern是一个正则表达式,这里未有用re.complie,hrefs正是卓绝到的页面内容,然后上边是多个for循环,不停地把相配到的要素加进列表里面,最终回到那一个列表。这一个函数就终止了。

本条正是运作之后的结果是完全不对的,程序根本不往文件夹里写东西
下一场下边包车型大巴代码是先生写的,可是她为了测验时间写了不少线程和进度,小编想把进程部分去掉,结果写了四次都卡住,后来径直照抄,难题要么存在。大家能够看一下主题材料出在哪,感极涕零!

def parseTarget(url):
    """
    根据目标url获取文章列表的urls
    """
    urls=[]
    content=urllib.urlopen(url).read()
    pattern = r'<a title=(.*?) href="(.*?)">'
    hrefs = re.findall(pattern,content)

    for href in hrefs:
        urls.append(href[1])

    return urls   
#!/usr/bin/env python
# coding: utf-8
#copyRight by heibanke

import urllib
import os
import re
from threading import Thread
from multiprocessing import Process
import time

def downloadURL(urls,dirpath):
    """
    urls: 需要下载的url列表
    dirpath: 下载的本地路径
    """
    for url in urls:
        if len(url)>0:
            #print "current process id is ",os.getpid()
            content = urllib.urlopen(url).read()
            if not os.path.exists(dirpath):
                os.makedirs(dirpath)
            open(dirpath+r'/'+url[-26:],'w').write(content)

def parseTarget(url):
    """
    根据目标url获取文章列表的urls
    """
    urls=[]
    content=urllib.urlopen(url).read()
    pattern = r'<a title=(.*?) href="(.*?)">'
    hrefs = re.findall(pattern,content)

    for href in hrefs:
        urls.append(href[1])

    return urls   

def thread_process_job(n, Thread_or_Process, url_list, job):
    """
    n: 多线程或多进程数
    Thread_Process: Thread/Process类 
    job: countdown任务
    """
    local_time=time.time()
    threads_or_processes = [Thread_or_Process(target=job,args=(url_list[i],str(n)+Thread_or_Process.__name__)) for i in xrange(n)]
    for t in threads_or_processes:
        t.start()
    for t in threads_or_processes:
        t.join()

    print n,Thread_or_Process.__name__," run job need ",time.time()-local_time

if __name__=="__main__":

    t=time.time()

    urls=[]
    for i in xrange(7):
        urls.extend(parseTarget('http://blog.sina.com.cn/s/articlelist_1191258123_0_'+str(i+1)+'.html'))

    url_len = len(urls)

    print "total urls number is ",url_len

    for n in [8,4,2,1]:
        #将urls分割到url_list
        url_list=[]
        url_split_len = url_len//n
        for i in xrange(n):
            if i==n-1:
                print "*************",len(url_list)
                url_list.append(urls[i*url_split_len:url_len])
                print len(url_list),'%%%%%%%'
            else:
                url_list.append(urls[i*url_split_len:(i+1)*url_split_len])
        #分割任务后创建线程
        thread_process_job(n,Thread, url_list, downloadURL)
        thread_process_job(n,Process, url_list, downloadURL)

    print "All done in ",time.time()-t

Paste_Image.png

以此正是小说的链接,检查成分这几个工具太强盛了。

Paste_Image.png

偏巧把代码调通了,错误的地点在错误代码里注释出来了

#!/usr/bin/env python
# coding: utf-8


import urllib
import os
import re
from threading import Thread
import time

def downloadURL(urls,dirpath):
    for url in urls:
        if len(url)>0:
            content = urllib.urlopen(url).read()
            if not os.path.exists(dirpath):
                os.makedirs(dirpath)
            open(dirpath+r'/'+url[-26:],'w').write(content)

def parseTarget(url):
    urls=[]
    content=urllib.urlopen(url).read()
    pattern = r'<a title=(.*?) href="(.*?)">'
    hrefs = re.findall(pattern,content)

    for href in hrefs:
        urls.append(href[1])
    return urls

def thread_job(n,Thread,url_list,job):
    local_time = time.time()
    threads = [Thread(target=job,args=(url_list[i],str(n)+Thread.__name__)) for i in xrange(n)]
    for t in threads:
        t.start()
    for t in threads:
        t.join()
    print n,Thread.__name__,"run job need",time.time()-local_time

if __name__=="__main__":
    t=time.time()
    urls=[]
    for i in xrange(7):
        urls.extend(parseTarget('http://blog.sina.com.cn/s/articlelist_1191258123_0_'+str(i+1)+'.html'))

    url_len = len(urls)

    print "total urls number is ",url_len

    for n in [4]:
        url_list=[]
        url_split_len = url_len//n
        for i in xrange(n):
            if i==n-1:
                url_list.append(urls[i*url_split_len:url_len])
            else:
                url_list.append(urls[i*url_split_len:(i+1)*url_split_len])

        thread_job(n,Thread,url_list,downloadURL)

    print "All done in",time.time()-t

这一个是不易的。

def downloadURL(urls,dirpath):
    """
    urls: 需要下载的url列表
    dirpath: 下载的本地路径
    """
    for url in urls:
        if len(url)>0:
            #print "current process id is ",os.getpid()
            content = urllib.urlopen(url).read()
            if not os.path.exists(dirpath):
                os.makedirs(dirpath)
            open(dirpath+r'/'+url[-26:],'w').write(content)

特轻便,张开链接,未有门路就创办路线,不过那几个从未写互斥锁,所以会出风流倜傥部分小标题,也得以用查重解决。然后把content写入文件。那个从未管理文章标题,过于简陋了。
下边这段代码是互相编制程序部分,多进度和三十十二线程都有

def thread_process_job(n, Thread_or_Process, url_list, job):
    """
    n: 多线程或多进程数
    Thread_Process: Thread/Process类 
    job: countdown任务
    """
    local_time=time.time()
    threads_or_processes = [Thread_or_Process(target=job,args=(url_list[i],str(n)+Thread_or_Process.__name__)) for i in xrange(n)]
    for t in threads_or_processes:
        t.start()
    for t in threads_or_processes:
        t.join()
    print n,Thread_or_Process.__name__," run job need ",time.time()-local_time

结缘主程序一齐看吗

if __name__=="__main__":
    t=time.time()

    urls=[]
    for i in xrange(7):
        urls.extend(parseTarget('http://blog.sina.com.cn/s/articlelist_1191258123_0_'+str(i+1)+'.html'))

    url_len = len(urls)

    print "total urls number is ",url_len

    for n in [8,4,2,1]:
        #将urls分割到url_list
        url_list=[]
        url_split_len = url_len//n
        for i in xrange(n):
            if i==n-1:
                url_list.append(urls[i*url_split_len:url_len])
            else:
                url_list.append(urls[i*url_split_len:(i+1)*url_split_len])
        #分割任务后创建线程
        thread_process_job(n,Thread, url_list, downloadURL)
        thread_process_job(n,Process, url_list, downloadURL)

    print "All done in ",time.time()-t

然后下面的代码是老师写的,比如Parallelpython就是用来做分布式计算的。引进time是为着查看在八线程和多进度情状下各自分成8,4,2哪些效能最高,末了测量试验结果是4历程作用比较高,时间非常的慢,不过意气风发旦是累累的产出,小编就说不定了。
前不久就到此地,前些天持续写并行编制程序(二卡塔尔(英语:State of Qatar)(三卡塔尔。作为多少个生手,学编制程序挺必要意志的,也没人事教育小编,出了难点不能不本身google。
世家共勉,愿你自己都能有自身想要的前途。

上一篇:没有了
下一篇:没有了