Merge branch 'master' into 0.2-dev

This commit is contained in:
Xing Xing 2014-01-09 16:18:52 +08:00
commit e701be9288
5 changed files with 86 additions and 47 deletions

View File

@ -6,6 +6,7 @@ import (
"io" "io"
"net" "net"
"sync" "sync"
"bufio"
) )
// One client connect to one server. // One client connect to one server.
@ -19,6 +20,7 @@ type Client struct {
in chan *Response in chan *Response
isConn bool isConn bool
conn net.Conn conn net.Conn
rw *bufio.ReadWriter
ErrorHandler ErrorHandler ErrorHandler ErrorHandler
} }
@ -36,6 +38,8 @@ func New(network, addr string) (client *Client, err error) {
if err != nil { if err != nil {
return return
} }
client.rw = bufio.NewReadWriter(bufio.NewReader(client.conn),
bufio.NewWriter(client.conn))
client.isConn = true client.isConn = true
go client.readLoop() go client.readLoop()
go client.processLoop() go client.processLoop()
@ -46,12 +50,12 @@ func (client *Client) write(req *request) (err error) {
var n int var n int
buf := req.Encode() buf := req.Encode()
for i := 0; i < len(buf); i += n { for i := 0; i < len(buf); i += n {
n, err = client.conn.Write(buf[i:]) n, err = client.rw.Write(buf[i:])
if err != nil { if err != nil {
return return
} }
} }
return return client.rw.Flush()
} }
func (client *Client) read(length int) (data []byte, err error) { func (client *Client) read(length int) (data []byte, err error) {
@ -59,7 +63,7 @@ func (client *Client) read(length int) (data []byte, err error) {
buf := getBuffer(bufferSize) buf := getBuffer(bufferSize)
// read until data can be unpacked // read until data can be unpacked
for i := length; i > 0 || len(data) < minPacketLength; i -= n { for i := length; i > 0 || len(data) < minPacketLength; i -= n {
if n, err = client.conn.Read(buf); err != nil { if n, err = client.rw.Read(buf); err != nil {
if err == io.EOF { if err == io.EOF {
err = ErrLostConn err = ErrLostConn
} }
@ -78,6 +82,7 @@ func (client *Client) readLoop() {
var data, leftdata []byte var data, leftdata []byte
var err error var err error
var resp *Response var resp *Response
ReadLoop:
for { for {
if data, err = client.read(bufferSize); err != nil { if data, err = client.read(bufferSize); err != nil {
client.err(err) client.err(err)
@ -93,24 +98,30 @@ func (client *Client) readLoop() {
client.err(err) client.err(err)
break break
} }
client.rw = bufio.NewReadWriter(bufio.NewReader(client.conn),
bufio.NewWriter(client.conn))
continue continue
} }
if len(leftdata) > 0 { // some data left for processing if len(leftdata) > 0 { // some data left for processing
data = append(leftdata, data...) data = append(leftdata, data...)
} }
for {
l := len(data) l := len(data)
if l < minPacketLength { // not enough data if l < minPacketLength { // not enough data
leftdata = data leftdata = data
continue continue ReadLoop
} }
if resp, l, err = decodeResponse(data); err != nil { if resp, l, err = decodeResponse(data); err != nil {
client.err(err) leftdata = data[l:]
continue ReadLoop
} else {
client.in <- resp
}
data = data[l:]
if len(data) > 0 {
continue continue
} }
client.in <- resp break
leftdata = nil
if len(data) > l {
leftdata = data[l:]
} }
} }
} }
@ -131,9 +142,13 @@ func (client *Client) processLoop() {
resp = client.handleInner("c", resp) resp = client.handleInner("c", resp)
case dtEchoRes: case dtEchoRes:
resp = client.handleInner("e", resp) resp = client.handleInner("e", resp)
case dtWorkData, dtWorkWarning, dtWorkStatus, dtWorkComplete, case dtWorkData, dtWorkWarning, dtWorkStatus:
dtWorkFail, dtWorkException:
resp = client.handleResponse(resp.Handle, resp) resp = client.handleResponse(resp.Handle, resp)
case dtWorkComplete, dtWorkFail, dtWorkException:
resp = client.handleResponse(resp.Handle, resp)
if resp != nil {
delete(client.respHandler, resp.Handle)
}
} }
} }
} }
@ -147,7 +162,6 @@ func (client *Client) err(e error) {
func (client *Client) handleResponse(key string, resp *Response) *Response { func (client *Client) handleResponse(key string, resp *Response) *Response {
if h, ok := client.respHandler[key]; ok { if h, ok := client.respHandler[key]; ok {
h(resp) h(resp)
delete(client.respHandler, key)
return nil return nil
} }
return resp return resp
@ -227,7 +241,7 @@ func (client *Client) Status(handle string) (status *Status, err error) {
client.lastcall = "s" + handle client.lastcall = "s" + handle
client.innerHandler["s"+handle] = func(resp *Response) { client.innerHandler["s"+handle] = func(resp *Response) {
var err error var err error
status, err = resp.Status() status, err = resp._status()
if err != nil { if err != nil {
client.err(err) client.err(err)
} }

View File

@ -49,19 +49,22 @@ const (
dtSubmitJobHighBg = 32 dtSubmitJobHighBg = 32
dtSubmitJobLow = 33 dtSubmitJobLow = 33
dtSubmitJobLowBg = 34 dtSubmitJobLowBg = 34
WorkComplate = dtWorkComplete
WorkDate = dtWorkData
WorkStatus = dtWorkStatus
WorkWarning = dtWorkWarning
WorkFail = dtWorkFail
WorkException = dtWorkException
) )
const ( const (
// Job type // Job type
// JOB_NORMAL | JOB_BG means a normal level job run in background JobNormal = iota
// normal level
JobNormal = 0
// background job
JobBg = 1
// low level // low level
JobLow = 2 JobLow
// high level // high level
JobHigh = 4 JobHigh
) )
func getBuffer(l int) (buf []byte) { func getBuffer(l int) (buf []byte) {

View File

@ -32,13 +32,7 @@ func (resp *Response) Result() (data []byte, err error) {
err = ErrWorkException err = ErrWorkException
fallthrough fallthrough
case dtWorkComplete: case dtWorkComplete:
s := bytes.SplitN(resp.Data, []byte{'\x00'}, 2) data = resp.Data
if len(s) != 2 {
err = fmt.Errorf("Invalid data: %V", resp.Data)
return
}
resp.Handle = string(s[0])
data = s[1]
default: default:
err = ErrDataType err = ErrDataType
} }
@ -52,26 +46,25 @@ func (resp *Response) Update() (data []byte, err error) {
err = ErrDataType err = ErrDataType
return return
} }
s := bytes.SplitN(resp.Data, []byte{'\x00'}, 2) data = resp.Data
if len(s) != 2 {
err = ErrInvalidData
return
}
if resp.DataType == dtWorkWarning { if resp.DataType == dtWorkWarning {
err = ErrWorkWarning err = ErrWorkWarning
} }
resp.Handle = string(s[0])
data = s[1]
return return
} }
// Decode a job from byte slice // Decode a job from byte slice
func decodeResponse(data []byte) (resp *Response, l int, err error) { func decodeResponse(data []byte) (resp *Response, l int, err error) {
if len(data) < minPacketLength { // valid package should not less 12 bytes a := len(data)
if a < minPacketLength { // valid package should not less 12 bytes
err = fmt.Errorf("Invalid data: %V", data) err = fmt.Errorf("Invalid data: %V", data)
return return
} }
dl := int(binary.BigEndian.Uint32(data[8:12])) dl := int(binary.BigEndian.Uint32(data[8:12]))
if a < minPacketLength + dl {
err = fmt.Errorf("Invalid data: %V", data)
return
}
dt := data[minPacketLength : dl+minPacketLength] dt := data[minPacketLength : dl+minPacketLength]
if len(dt) != int(dl) { // length not equal if len(dt) != int(dl) { // length not equal
err = fmt.Errorf("Invalid data: %V", data) err = fmt.Errorf("Invalid data: %V", data)
@ -101,8 +94,31 @@ func decodeResponse(data []byte) (resp *Response, l int, err error) {
return return
} }
// status handler
func (resp *Response) Status() (status *Status, err error) { func (resp *Response) Status() (status *Status, err error) {
data := bytes.SplitN(resp.Data, []byte{'\x00'}, 2)
if len(data) != 2 {
err = fmt.Errorf("Invalid data: %V", resp.Data)
return
}
status = &Status{}
status.Handle = resp.Handle
status.Known = true
status.Running = true
status.Numerator, err = strconv.ParseUint(string(data[0]), 10, 0)
if err != nil {
err = fmt.Errorf("Invalid Integer: %s", data[0])
return
}
status.Denominator, err = strconv.ParseUint(string(data[1]), 10, 0)
if err != nil {
err = fmt.Errorf("Invalid Integer: %s", data[1])
return
}
return
}
// status handler
func (resp *Response) _status() (status *Status, err error) {
data := bytes.SplitN(resp.Data, []byte{'\x00'}, 4) data := bytes.SplitN(resp.Data, []byte{'\x00'}, 4)
if len(data) != 4 { if len(data) != 4 {
err = fmt.Errorf("Invalid data: %V", resp.Data) err = fmt.Errorf("Invalid data: %V", resp.Data)

View File

@ -5,12 +5,14 @@ import (
"net" "net"
"strings" "strings"
"sync" "sync"
"bufio"
) )
// The agent of job server. // The agent of job server.
type agent struct { type agent struct {
sync.Mutex sync.Mutex
conn net.Conn conn net.Conn
rw *bufio.ReadWriter
worker *Worker worker *Worker
in chan []byte in chan []byte
net, addr string net, addr string
@ -34,6 +36,8 @@ func (a *agent) Connect() (err error) {
if err != nil { if err != nil {
return return
} }
a.rw = bufio.NewReadWriter(bufio.NewReader(a.conn),
bufio.NewWriter(a.conn))
go a.work() go a.work()
return return
} }
@ -63,6 +67,8 @@ func (a *agent) work() {
a.worker.err(err) a.worker.err(err)
break break
} }
a.rw = bufio.NewReadWriter(bufio.NewReader(a.conn),
bufio.NewWriter(a.conn))
} }
if len(leftdata) > 0 { // some data left for processing if len(leftdata) > 0 { // some data left for processing
data = append(leftdata, data...) data = append(leftdata, data...)
@ -125,7 +131,7 @@ func (a *agent) read(length int) (data []byte, err error) {
buf := getBuffer(bufferSize) buf := getBuffer(bufferSize)
// read until data can be unpacked // read until data can be unpacked
for i := length; i > 0 || len(data) < minPacketLength; i -= n { for i := length; i > 0 || len(data) < minPacketLength; i -= n {
if n, err = a.conn.Read(buf); err != nil { if n, err = a.rw.Read(buf); err != nil {
if isClosed(err) { if isClosed(err) {
err = ErrLostConn err = ErrLostConn
} }
@ -144,10 +150,10 @@ func (a *agent) write(outpack *outPack) (err error) {
var n int var n int
buf := outpack.Encode() buf := outpack.Encode()
for i := 0; i < len(buf); i += n { for i := 0; i < len(buf); i += n {
n, err = a.conn.Write(buf[i:]) n, err = a.rw.Write(buf[i:])
if err != nil { if err != nil {
return err return err
} }
} }
return return a.rw.Flush()
} }

View File

@ -77,7 +77,7 @@ func (inpack *inPack) UpdateStatus(numerator, denominator int) {
hl := len(inpack.handle) hl := len(inpack.handle)
nl := len(n) nl := len(n)
dl := len(d) dl := len(d)
outpack.data = getBuffer(hl + nl + dl + 3) outpack.data = getBuffer(hl + nl + dl + 2)
copy(outpack.data, []byte(inpack.handle)) copy(outpack.data, []byte(inpack.handle))
copy(outpack.data[hl+1:], n) copy(outpack.data[hl+1:], n)
copy(outpack.data[hl+nl+2:], d) copy(outpack.data[hl+nl+2:], d)