一、Python爬虫
---------------------------------------------------
    1.测试

# -*- encoding=utf-8 -*-

        import urllib.request

        # 打开url上的资源
        resp = urllib.request.urlopen("http://focus.tianya.cn/")
        # 读取内容,返回byte数组
        mybytes = resp.read()
        # 解码bytes成为string
        mystr = mybytes.decode("utf-8")
        # 关闭资源
        resp.close()
        # 输出
        print(mystr)

        # 导入正则表达式模块
        import re

        ptn = u'<a\s*href="([\u0000-\uffff&&^"]*?)"'        #非贪婪模式
        res = re.finditer(ptn,mystr)
        for r in res:
            addr = r.group(1);
            print(addr)



    2.递归爬并保存

# -*- encoding=utf-8 -*-

        import urllib.request
        import os;
        import re;

        def fileExists(url,localpath):
            path = url ;
            path = path.replace(":", "_");
            path = path.replace("/", "$");
            path = path.replace("?", "$");
            path = localpath + "/" + path;
            return os.path.exists(path) ;

        #下载网页方法
        def download(url):
            #处理处理问题
            path = url ;
            path = path.replace(":","_");
            path = path.replace("/","$");
            path = path.replace("?","$");
            path = "d:/py/data/" + path;

            #判断当前的网页是否已经下载
            resp = urllib.request.urlopen(url)
            pageBytes = resp.read()
            resp.close

            if not os.path.exists(path):
                #保存文件到磁盘
                f = open(path,"wb");
                f.write(pageBytes) ;
                f.close();

            try:
                #解析网页的内容
                pageStr = pageBytes.decode("utf-8");
                #解析href地址
                pattern = u'<a[\u0000-\uffff&&^[href]]*href="([\u0000-\uffff&&^"]*?)"'
                res = re.finditer(pattern, pageStr)
                for r in res:
                    addr = r.group(1);
                    print(addr)
                    if addr.startswith("//"):
                        addr = addr.replace("//","http://");

                    #判断网页中是否包含自己的地址
                    if (addr.startswith("http://") and not fileExists(addr,"d:/py/data")):
                        download(addr) ;

            except Exception as e:
                #print(url + " : 不是文本") ;
                #print(Exception)
                print(e)
                # print(pageBytes.decode("gbk", errors='ignore'));
                return ;

        download("http://www.jd.com");



二、Python协同hbase实现数据的写入
--------------------------------------------------------------
    1.启动hbase集群
        a.启动zk

        b.启动hadoop集群

        c.启动hbase集群
            如果时钟不同步。
            $>su root
            $>xcall.sh "ntpdate asia.pool.ntp.org"

    2.s100上启动hbase的thriftserver服务器,满足和第三方应用通信
        $> hbase-daemon.sh start thrift2

    3.查看webui
        http://s100:9095/           //webui端口
        $> netstat -anop | grep 9090   //9090 rpc端口

    4.下载windows下thrift的编译器,不需要安装,仅仅是个工具。
        thrift-0.10.0.exe

    5.下载并安装thrift的python模块.
        5.1)下载文件
            thrift-0.10.0.tar.gz

        5.2)tar开文件

        5.3)进入目录
            cmd>cd thrift-0.10.0\lib\py
            cmd>setup.py install

            ...
            Using c:\users\administrator\appdata\local\programs\python\python37\lib\site-packages\six-1.11.0-py3.7.egg
            Finished processing dependencies for thrift==0.10.0

    6.测试在py文件中是否能够导入
        from thrift import Thrift
        from thrift.transport import TSocket
        from thrift.transport import TTransport
        from thrift.protocol import TBinaryProtocol

    7.找到hbase.thrift文件进行编译,产生python文件,使用以下命令进行编译
        [hbase.thrift文件位于hbase安装包下,找不到就去网上下载]
        cmd> thrift-0.10.0.exe -o ./out -gen py hbase.thrift

    8.将生成的文件夹拷贝到idea/python模块下
        a.在模块下新建一个pythonpackage, 叫mythrift
        b.将生成的py文件夹下的hbase文件夹拷贝到mythrift下

    9.使用python操作hbase的表
# -*- encoding=utf-8 -*-

        # 导入原生模块
        import os

        # 导入thrift的python模块
        from thrift import Thrift
        from thrift.transport import TSocket
        from thrift.transport import TTransport
        from thrift.protocol import TBinaryProtocol

        # 导入自已编译生成的hbase python模块
        from mythrift.hbase import THBaseService
        from mythrift.hbase.ttypes import *
        from mythrift.hbase.ttypes import TResult

        # 创建Socket连接,到s100:9090
        transport = TSocket.TSocket('s100', 9090)
        transport = TTransport.TBufferedTransport(transport)
        protocol = TBinaryProtocol.TBinaryProtocol(transport)
        client = THBaseService.Client(protocol)

        # 打开传输端口
        transport.open()

        # # put操作
        # table = b'ns1:t1'
        # row = b'row2'
        # v1 = TColumnValue(b'f1', b'id', b'101')
        # v2 = TColumnValue(b'f1', b'name', b'tomas')
        # v3 = TColumnValue(b'f1', b'age', b'12')
        # vals = [v1, v2, v3]
        # put = TPut(row, vals)
        # client.put(table, put)
        # print("okkkk!!")
        # transport.close()

        # # get操作
        # table = b'ns1:t1'
        # rowkey=b"row2"
        # col_id = TColumn(b"f1",b"id")
        # col_name = TColumn(b"f1",b"name")
        # col_age = TColumn(b"f1",b"age")
        #
        # cols = [col_id,col_name,col_age]
        # get = TGet(rowkey,cols)
        # res = client.get(table,get)
        # print(res.columnValues)
        # print(bytes.decode(res.columnValues[0].qualifier))
        # print(bytes.decode(res.columnValues[0].family))
        # print(res.columnValues[0].timestamp)
        # print(bytes.decode(res.columnValues[0].value))

        # # delete操作
        # table = b'ns1:t1'
        # rowkey = b"row2"
        # col_id = TColumn(b"f1", b"id")
        # col_name = TColumn(b"f1", b"name")
        # col_age = TColumn(b"f1", b"age")
        # cols = [col_id, col_name, col_age]
        #
        # #构造删除对象
        # delete = TDelete(rowkey,cols)
        # res = client.deleteSingle(table, delete)
        # transport.close()
        # print("ok")

        # scan 扫描操作
        table = b'call:calllogs'
        startRow = b'34,13520401111,20180114152647,0,13269364444,406'
        stopRow = b'90,15032295555,20180922165903,0,15778421111,298'
        dur = TColumn(b"f1", b"callDuration")
        time = TColumn(b"f1", b"callTime")
        caller = TColumn(b"f1", b"caller")
        callee = TColumn(b"f1", b"callee")
        cols = [dur, time,caller,callee]

        scan = TScan(startRow=startRow,stopRow=stopRow,columns=cols)
        r = client.getScannerResults(table,scan,100);
        for x in r:
            print("============")
            print(bytes.decode(x.columnValues[0].qualifier))
            print(bytes.decode(x.columnValues[0].family))
            print(x.columnValues[0].timestamp)
            print(bytes.decode(x.columnValues[0].value))

        # scan 全表扫描操作
        table = b'call:calllogs'
        # startRow = b'34,13520401111,20180114152647,0,13269364444,406'
        # stopRow = b'90,15032295555,20180922165903,0,15778421111,298'
        dur = TColumn(b"f1", b"callDuration")
        time = TColumn(b"f1", b"callTime")
        caller = TColumn(b"f1", b"caller")
        callee = TColumn(b"f1", b"callee")
        cols = [dur, time,caller,callee]

        scan = TScan(columns=cols)
        r = client.getScannerResults(table,scan,100);
        print(len(r))
        for x in r:
            print("============")
            print(bytes.decode(x.columnValues[0].qualifier))
            print(bytes.decode(x.columnValues[0].family))
            print(x.columnValues[0].timestamp)
            print(bytes.decode(x.columnValues[0].value))



三、SparkShell使用Python进行WorldCount
-----------------------------------------------------------------
    1.本地模式[可以使用scala和python编写]
        a.移除spark/conf/core-site.xml | hdfs-site.xml | hive-site.xml文件
            [这样spark就不会去集成hive了]

        b.进入pyspark shell
            $> cd /soft/spark/bin
            $> ./pyspark --master local[*]
            >>> arr = [1,2,3,4]
            >>> rdd = sc.parellize(arr);
            >>> rdd.map(lambda e : (e,1))       #python的lamba表达式

    2.WorldCountDemo
        >>> arr = ["tom","tom1","tom1","tom3"]
        >>> rdd1 = sc.parallelize(arr)
        >>> rdd1.collect()
        ['tom', 'tom1', 'tom1', 'tom3']
        >>> rdd1.map(lambda e : (e,1))
        PythonRDD[4] at RDD at PythonRDD.scala:48
        >>> rdd2 = rdd1.map(lambda e : (e,1))
        >>> rdd2.collect()
        [('tom', 1), ('tom1', 1), ('tom1', 1), ('tom3', 1)]
        >>> rdd3 = rdd2.reduceByKey()
        Traceback (most recent call last):
          File "<stdin>", line 1, in <module>
        TypeError: reduceByKey() takes at least 2 arguments (1 given)
        >>> rdd3 = rdd2.reduceByKey(lambda a,b : a + b)
        >>> rdd3.collect()
        [('tom1', 2), ('tom3', 1), ('tom', 1)]


四、爬虫程序更改 -- 将爬到的网页储存到hbase中
---------------------------------------------------------
    1.使用base64进行编解码
        import base64;

        url = b"http://tianya.cn";
        b = base64.encodebytes(url);
        print(b)
        bb = base64.decodebytes(b)
        print(bb)

    2.创建hbase表:pages
        hbase> create 'ns1:pages','f1'

    3.编写pageDao.py,专门处理hbase表的crud

# -*- encoding=utf-8 -*-

        # 导入原生模块
        import os
        import base64

        # 导入thrift的python模块
        from thrift import Thrift
        from thrift.transport import TSocket
        from thrift.transport import TTransport
        from thrift.protocol import TBinaryProtocol

        # 导入自已编译生成的hbase python模块
        from mythrift.hbase import THBaseService
        from mythrift.hbase.ttypes import *
        from mythrift.hbase.ttypes import TResult

        # 创建Socket连接,到s100:9090
        transport = TSocket.TSocket('s100', 9090)
        transport = TTransport.TBufferedTransport(transport)
        protocol = TBinaryProtocol.TBinaryProtocol(transport)
        client = THBaseService.Client(protocol)

        #定义函数,保存网页
        def savePage(url,page):
            #
            transport.open()
            #对url进行base64编码,形成bytes,作为rowkey
            urlBase64Bytes = base64.encodebytes(url.encode("utf-8"))

            # put操作
            table = b'ns1:pages'
            rowkey = urlBase64Bytes
            v1 = TColumnValue(b'f1', b'page', page)
            vals = [v1]
            put = TPut(rowkey, vals)
            client.put(table, put)
            transport.close()

        #判断网页是否存在
        def exists(url):
            transport.open()
            # 对url进行base64编码,形成bytes,作为rowkey
            urlBase64Bytes = base64.encodebytes(url.encode("utf-8"))
            print(urlBase64Bytes)

            table = b'ns1:pages'
            rowkey = urlBase64Bytes
            col_page = TColumn(b"f1",b"page")

            cols = [col_page]
            get = TGet(rowkey,cols)
            res = client.get(table, get)
            transport.close()
            return res.row is not None



    4.编写爬虫程序

 # -*- encoding=utf-8 -*-

        import urllib.request
        import os
        import re
        import pageDao

        #下载网页方法
        def download(url):
            #判断当前的网页是否已经下载
            resp = urllib.request.urlopen(url)
            pageBytes = resp.read()
            resp.close

            if not pageDao.exists(url):
                pageDao.savePage(url, pageBytes);

            try:
                #解析网页的内容
                pageStr = pageBytes.decode("utf-8");
                #解析href地址
                pattern = u'<a[\u0000-\uffff&&^[href]]*href="([\u0000-\uffff&&^"]*?)"'
                res = re.finditer(pattern, pageStr)
                for r in res:
                    addr = r.group(1);
                    print(addr)
                    if addr.startswith("//"):
                        addr = addr.replace("//","http://");

                    #判断网页中是否包含自己的地址
                    if addr.startswith("http://") and url != addr and (not pageDao.exists(addr)):
                        download(addr) ;

            except Exception as e:
                print(e)
                print(pageBytes.decode("gbk", errors='ignore'));
                return ;

        download("http://jd.com");




五、使用python实现spark的数据分析,生成分析图表
-----------------------------------------------------------------
    1.Win上安装pip安装python的模块
        1.numpy
            cmd> pip install -i https://pypi.tuna.tsinghua.edu.cn/simple numpy

        2.scipy
            cmd> pip install -i https://pypi.tuna.tsinghua.edu.cn/simple scipy

        3.matplotpy
            cmd> pip install -i https://pypi.tuna.tsinghua.edu.cn/simple matplotlib
            cmd> python -m pip install -U pip setuptools
            cmd> python -m pip install matplotlib

    2.ubuntu下安装numpy,scipy,pandas,matplotlib模块
        a.numpy
            安装:
            如果你的ubuntu中没有安装python,请首先安装python
            在终端输入以下命令:
            $> sudo apt-get update
            $> sudo apt-get install python-numpy
            $> sudo apt-get install python3-numpy
            如果是python3,则将上面的python-numpy换成python3-numpy即可

        b.scipy
            $> sudo apt-get update
            $> sudo apt-get install python-scipy

        c.pandas
            $> sudo apt-get update
            $> sudo apt-get install python-pandas

        d.matplotlib
            $> sudo apt-get update
            $> sudo apt-get install python-matplotlib

        e.scikit-learn
            $> sudo apt-get update
            $> sudo apt-get install python-sklearn

    3.进入PythonSparkShell
        cmd> ./pyspark  --master local[*]
        $> ./pyspark --master local[*]

    4.粘贴下面代码,创建数据框
        from pyspark.sql import Row
        import matplotlib.pyplot as plt
        import numpy as np
        import pylab as P
        plt.rcdefaults()
        dataDir ="file:///D://share//python//ml-data//ml-1m//users.dat"
        dataDir ="file:///mnt/hgfs/share/python/ml-data/ml-1m/users.dat"
        lines = sc.textFile(dataDir)
        splitLines = lines.map(lambda l: l.split("::"))
        usersRDD = splitLines.map(lambda p: Row(id=p[0],gender=p[1],age=int(p[2]), occupation=p[3], zipcode=p[4]))
        usersDF = spark.createDataFrame(usersRDD)
        usersDF.createOrReplaceTempView("users")
        usersDF.show()

    5.生成图表
























#生成直方图
        ageDF = spark.sql("SELECT age FROM users")
        ageList = ageDF.rdd.map(lambda p: p.age).collect()
        ageDF.describe().show()
        plt.hist(ageList)
        plt.title("Age distribution of the users\n")
        plt.xlabel("Age")
        plt.ylabel("Number of users")
        plt.show(block=False)

        #密度图
        from scipy.stats import gaussian_kde
        density = gaussian_kde(ageList)
        xAxisValues = np.linspace(0,100,1000)
        density.covariance_factor = lambda : .5
        density._compute_covariance()
        plt.title("Age density plot of the users\n")
        plt.xlabel("Age")
        plt.ylabel("Density")
        plt.plot(xAxisValues, density(xAxisValues))
        plt.show(block=False)

        #生成嵌套子图
        plt.subplot(121)
        plt.hist(ageList)
        plt.title("Age distribution of the users\n")
        plt.xlabel("Age")
        plt.ylabel("Number of users")
        plt.subplot(122)
        plt.title("Summary of distribution\n")
        plt.xlabel("Age")
        plt.boxplot(ageList, vert=False)
        plt.show(block=False)

        #柱状图
        occ10 = spark.sql("SELECT occupation, count(occupation) as usercount FROM users GROUP BY occupation ORDER BY usercount DESC LIMIT 10")
        occ10.show()

        occTuple = occ10.rdd.map(lambda p:(p.occupation,p.usercount)).collect()
        occList, countList = zip(*occTuple)
        occList

        y_pos = np.arange(len(occList))
        plt.barh(y_pos, countList, align='center', alpha=0.4)
        plt.yticks(y_pos, occList)
        plt.xlabel('Number of users')
        plt.title('Top 10 user types\n')
        plt.gcf().subplots_adjust(left=0.15)
        plt.show(block=False)


        #堆栈条形图
        occGender = spark.sql("SELECT occupation, gender FROM users")
        occGender.show()

        occCrossTab = occGender.stat.crosstab("occupation","gender")
        occupationsCrossTuple = occCrossTab.rdd.map(lambda p:(p.occupation_gender,p.M, p.F)).collect()
        occList, mList, fList = zip(*occupationsCrossTuple)
        N = len(occList)
        ind = np.arange(N)
        width = 0.75
        p1 = plt.bar(ind, mList, width, color='r')
        p2 = plt.bar(ind, fList, width, color='y', bottom=mList)
        plt.ylabel('Count')
        plt.title('Gender distribution by occupation\n')
        plt.xticks(ind + width/2., occList, rotation=90)
        plt.legend((p1[0], p2[0]), ('Male', 'Female'))
        plt.gcf().subplots_adjust(bottom=0.25)
        plt.show(block=False)

        #饼图
        occupationsBottom10 = spark.sql("SELECT occupation,count(occupation) as usercount FROM users GROUP BY occupation ORDER BY usercount LIMIT 10")
        occupationsBottom10Tuple = occupationsBottom10.rdd.map(lambda p:(p.occupation,p.usercount)).collect()
        occupationsBottom10List, countBottom10List =zip(*occupationsBottom10Tuple)
        explode = (0, 0.3, 0.2, 0.15,0.1,0,0,0,0,0.1)
        plt.pie(countBottom10List, explode=explode,labels=occupationsBottom10List, autopct='%1.1f%%', shadow=True,startangle=90)
        plt.title('Bottom 10 user types\n')
        plt.show(block=False)

 

12-05 04:39