python基于新浪sae开发的微信公众平台,实现功能:
输入段子—回复笑话
输入开源+文章—发送消息到开源中国
输入快递+订单号—查询快递信息
输入天气—查询南京最近五天天气状况
输入微博热点—回复微博当前热门话题
输入电影+名称—回复百度云盘中搜索的链接
具体实现代码:
# -*- coding: utf-8 -*-
import hashlib
import web
import lxml
import time
import os
import urllib2,json
import urllib
import re
import random
import hashlib
import cookielib
from urllib import urlencode
from lxml import etree
class weixininterface:
def __init__(self):
self.app_root = os.path.dirname(__file__)
self.templates_root = os.path.join(self.app_root, ‘templates’)
self.render = web.template.render(self.templates_root)
def get(self):
#获取输入参数
data = web.input()
signature=data.signature
timestamp=data.timestamp
nonce=data.nonce
echostr=data.echostr
#自己的token
token=”weixin9047″ #这里改写你在微信公众平台里输入的token
#字典序排序
list=[token,timestamp,nonce]
list.sort()
sha1=hashlib.sha1()
map(sha1.update,list)
hashcode=sha1.hexdigest()
#sha1加密算法
#如果是来自微信的请求,则回复echostr
if hashcode == signature:
return echostr
def post(self):
str_xml = web.data() #获得post来的数据
xml = etree.fromstring(str_xml)#进行xml解析
content=xml.find(“content”).text#获得用户所输入的内容
msgtype=xml.find(“msgtype”).text
fromuser=xml.find(“fromusername”).text
touser=xml.find(“tousername”).text
if(content == u”天气”):
url = “http://m.ip138.com/21/nanjing/tianqi/”
headers = {
‘connection’: ‘keep-alive’,
‘accept’: ‘text/html, application/xhtml+xml, */*’,
‘accept-language’: ‘en-us,en;q=0.8,zh-hans-cn;q=0.5,zh-hans;q=0.3’,
‘user-agent’: ‘mozilla/5.0 (windows nt 6.3; wow64; trident/7.0; rv:11.0) like gecko’}
req = urllib2.request(url, headers = headers)
opener = urllib2.urlopen(req)
html = opener.read()
rex = r'(?)’
rexx = r'(?).{5,15}(?=
)’
n = re.findall(rex,html)
m = re.findall(rexx,html)
str_wether = “”
for (i,j) in zip(m,n):
str_wether = str_wether + j + ” ” +i + “\n”
return self.render.reply_text(fromuser,touser,int(time.time()),”最近五天天气:\n”+str_wether)
elif(content[0:2] == u”电影”):
keyword = urllib.quote(content[2:].encode(“utf-8”))
url = “http://www.wangpansou.cn/s.php?q=”+keyword
headers = {
‘connection’: ‘keep-alive’,
‘accept’: ‘text/html, application/xhtml+xml, */*’,
‘accept-language’: ‘en-us,en;q=0.8,zh-hans-cn;q=0.5,zh-hans;q=0.3’,
‘user-agent’: ‘mozilla/5.0 (windows nt 6.3; wow64; trident/7.0; rv:11.0) like gecko’}
req = urllib2.request(url, headers = headers)
opener = urllib2.urlopen(req)
html = opener.read()
rex = r’https?://pan.baidu.com.*\?uk=[0-9]{10}.*[\d+?]”‘
m = re.findall(rex,html)
string = u””
for i in m:
string = string + i + “\n”
return self.render.reply_text(fromuser,touser,int(time.time()),u”以下是电影链接:\n”+string)
elif(u”段子” in content):
url_8 = “http://www.qiushibaike.com/”
url_24 = “http://www.qiushibaike.com/hot/”
headers = {
‘connection’: ‘keep-alive’,
‘accept’: ‘text/html, application/xhtml+xml, */*’,
‘accept-language’: ‘en-us,en;q=0.8,zh-hans-cn;q=0.5,zh-hans;q=0.3’,
‘user-agent’: ‘mozilla/5.0 (windows nt 6.3; wow64; trident/7.0; rv:11.0) like gecko’}
req_8 = urllib2.request(url_8, headers = headers)
req_24 = urllib2.request(url_24,headers = headers)
opener_8 = urllib2.urlopen(req_8)
opener_24 = urllib2.urlopen(req_24)
html_8 = opener_8.read()
html_24 = opener_24.read()
rex = r'(?).*?(?=