本文实例讲述了python实现的文件同步服务器。分享给大家供大家参考。具体实现方法如下:
服务端使用asyncore, 收到文件后保存到本地。
客户端使用pyinotify监视目录的变化 ,把变动的文件发送到服务端。
重点:
1. 使用structs打包发送文件的信息,服务端收到后,根据文件信息来接收客户端传送过来的文件。
2. 客户端使用多线程,pyinotify监视到文件变化,放到队列中,由另外一个线程发送。
上代码:
服务端:
# receive file from client and store them into file use asyncore.#
#/usr/bin/python
#coding: utf-8
import asyncore
import socket
from socket import errno
import logging
import time
import sys
import struct
import os
import fcntl
import threading
from rrd_graph import makegraph
try:
import rrdtool
except (importerror, importwarnning):
print “hope this information can help you:”
print “can not find pyinotify module in sys path, just run [apt-get install python-rrdtool] in ubuntu.”
sys.exit(1)
class requesthandler(asyncore.dispatcher):
def __init__(self, sock, map=none, chunk_size=1024):
self.logger = logging.getlogger(‘%s-%s’ % (self.__class__.__name__, str(sock.getsockname())))
self.chunk_size = chunk_size
asyncore.dispatcher.__init__(self,sock,map)
self.data_to_write = list()
def readable(self):
#self.logger.debug(“readable() called.”)
return true
def writable(self):
response = (not self.connected) or len(self.data_to_write)
#self.logger.debug(‘writable() -> %s data length -> %s’ % (response, len(self.data_to_write)))
return response
def handle_write(self):
data = self.data_to_write.pop()
#self.logger.debug(“handle_write()->%s size: %s”,data.rstrip(‘\r\n’),len(data))
sent = self.send(data[:self.chunk_size])
if sent < len(data):
remaining = data[sent:]
self.data_to_write.append(remaining)
def handle_read(self):
self.writen_size = 0
nagios_perfdata = '../perfdata'
head_packet_format = "!ll128s128sl"
head_packet_size = struct.calcsize(head_packet_format)
data = self.recv(head_packet_size)
if not data:
return
filepath_len, filename_len, filepath,filename, filesize = struct.unpack(head_packet_format,data)
filepath = os.path.join(nagios_perfdata, filepath[:filepath_len])
filename = filename[:filename_len]
self.logger.debug("update file: %s" % filepath + '/' + filename)
try:
if not os.path.exists(filepath):
os.makedirs(filepath)
except oserror:
pass
self.fd = open(os.path.join(filepath,filename), 'w')
#self.fd = open(filename,'w')
if filesize > self.chunk_size:
times = filesize / self.chunk_size
first_part_size = times * self.chunk_size
second_part_size = filesize % self.chunk_size
while 1:
try:
data = self.recv(self.chunk_size)
#self.logger.debug(“handle_read()->%s size.”,len(data))
except socket.error,e:
if e.args[0] == errno.ewouldblock:
print “ewouldblock”
time.sleep(1)
else:
#self.logger.debug(“error happend while receive data: %s” % e)
break
else:
self.fd.write(data)
self.fd.flush()
self.writen_size += len(data)
if self.writen_size == first_part_size:
break
#receive the packet at last
while 1:
try:
data = self.recv(second_part_size)
#self.logger.debug(“handle_read()->%s size.”,len(data))
except socket.error,e:
if e.args[0] == errno.ewouldblock:
print “ewouldblock”
time.sleep(1)
else:
#self.logger.debug(“error happend while receive data: %s” % e)
break
else:
self.fd.write(data)
self.fd.flush()
self.writen_size += len(data)
if len(data) == second_part_size:
break
elif filesize 0:
for i in range(died_threads):
print “start %s thread…” % name
t = obj[0].__class__(*parameters)
t.start()
self.add_to_pool_list(t,name)
else:
break
time.sleep(0.5)
def cal_died_thread(self,pool_list,name):
i = 0
for item in self.pool_list:
for k,v in item.items():
if name == k:
lists = v
for t in lists:
if not t.isalive():
self.remove_from_pool_list(t)
i +=1
return i
def add_to_pool_list(self,obj,name):
for item in self.pool_list:
for k,v in item.items():
if name == k:
v.append(obj)
def remove_from_pool_list(self, obj):
for item in self.pool_list:
for k,v in item.items():
try:
v.remove(obj)
except:
pass
else:
return
使用方法:
rrds_queue = queue.queue()
make_rrds_pool = []
for i in range(5):
make_rrds_pool.append(makerrds(rrds_queue))
for i in make_rrds_pool:
i.start()
make_graph_pool = []
for i in range(5):
make_graph_pool.append(makegraph(rrds_queue))
for i in make_graph_pool:
i.start()
monitor = monitor(make_rrds_pool=(make_rrds_pool, rrds_queue), \
make_graph_pool=(make_graph_pool, rrds_queue))
monitor.start()
解析:
1. 接受字典参数,value为一个元组,第一个元素是线程池,后面的都是参数。
2. 每0.5秒监视线程池中的线程数量,如果线程死掉了,记录死掉线程的数目,再启动同样数量的线程。
3. 如果没有线程死去,则什么也不做。
从外部调用django模块
import os
import sys
sys.path.insert(0,’/data/cloud_manage’)
from django.core.management import setup_environ
import settings
setup_environ(settings)
from common.monitor import monitor
from django.db import connection, transaction
前提就是,要新建一个django的project,这里我们新建了一个cloud_manage.
这样不仅可以调用django自身的模块,还能调用project本身的东西。
希望本文所述对大家的python程序设计有所帮助。