Unverified Commit ed17f184 authored by vipwzw's avatar vipwzw Committed by GitHub

Merge pull request #638 from zzh33cn/dpos-v2

UT bugfix
parents 7fc6884b 0bdc5921
......@@ -53,16 +53,6 @@ var (
msgQueueSize = 1000
)
// internally generated messages which may update the state
type timeoutInfo struct {
Duration time.Duration `json:"duration"`
State int `json:"state"`
}
func (ti *timeoutInfo) String() string {
return fmt.Sprintf("%v", ti.Duration)
}
// ConsensusState handles execution of the consensus algorithm.
type ConsensusState struct {
// config details
......@@ -78,7 +68,7 @@ type ConsensusState struct {
// msgs from ourself, or by timeouts
peerMsgQueue chan MsgInfo
internalMsgQueue chan MsgInfo
timeoutTicker TimeoutTicker
timer *time.Timer
broadcastChannel chan<- MsgInfo
ourID ID
......@@ -120,7 +110,6 @@ func NewConsensusState(client *Client, valMgr ValidatorMgr) *ConsensusState {
client: client,
peerMsgQueue: make(chan MsgInfo, msgQueueSize),
internalMsgQueue: make(chan MsgInfo, msgQueueSize),
timeoutTicker: NewTimeoutTicker(),
Quit: make(chan struct{}),
dposState: InitStateObj,
......@@ -184,55 +173,35 @@ func (cs *ConsensusState) SetPrivValidator(priv ttypes.PrivValidator, index int)
cs.privValidatorIndex = index
}
// SetTimeoutTicker sets the local timer. It may be useful to overwrite for testing.
//func (cs *ConsensusState) SetTimeoutTicker(timeoutTicker TimeoutTicker) {
// cs.mtx.Lock()
// defer cs.mtx.Unlock()
// cs.timeoutTicker = timeoutTicker
//}
// Start It start first time starts the timeout receive routines.
func (cs *ConsensusState) Start() {
if atomic.CompareAndSwapUint32(&cs.started, 0, 1) {
if atomic.LoadUint32(&cs.stopped) == 1 {
dposlog.Error("ConsensusState already stoped")
}
cs.timeoutTicker.Start()
// now start the receiveRoutine
go cs.receiveRoutine()
// schedule the first round!
cs.scheduleDPosTimeout(time.Second*3, InitStateType)
}
}
// Stop timer and receive routine
func (cs *ConsensusState) Stop() {
cs.timeoutTicker.Stop()
cs.Quit <- struct{}{}
}
// Attempt to schedule a timeout (by sending timeoutInfo on the tickChan)
func (cs *ConsensusState) scheduleDPosTimeout(duration time.Duration, stateType int) {
cs.timeoutTicker.ScheduleTimeout(timeoutInfo{Duration: duration, State: stateType})
}
// send a msg into the receiveRoutine regarding our own proposal, block part, or vote
/*
func (cs *ConsensusState) sendInternalMessage(mi MsgInfo) {
select {
case cs.internalMsgQueue <- mi:
default:
// NOTE: using the go-routine means our votes can
// be processed out of order.
// TODO: use CList here for strict determinism and
// attempt push to internalMsgQueue in receiveRoutine
dposlog.Info("Internal msg queue is full. Using a go-routine")
go func() { cs.internalMsgQueue <- mi }()
// Attempt to reset the timer
func (cs *ConsensusState) resetTimer(duration time.Duration, stateType int) {
dposlog.Info("set timer", "duration", duration, "state", StateTypeMapping[stateType])
if !cs.timer.Stop() {
select {
case <-cs.timer.C:
default:
}
}
cs.timer.Reset(duration)
}
*/
// Updates ConsensusState and increments height to match that of state.
// The round becomes 0 and cs.Step becomes ttypes.RoundStepNewHeight.
func (cs *ConsensusState) updateToValMgr(valMgr ValidatorMgr) {
......@@ -254,6 +223,8 @@ func (cs *ConsensusState) receiveRoutine() {
}
}()
cs.timer = time.NewTimer(time.Second * 3)
for {
var mi MsgInfo
......@@ -265,12 +236,11 @@ func (cs *ConsensusState) receiveRoutine() {
case mi = <-cs.internalMsgQueue:
// handles proposals, block parts, votes
cs.handleMsg(mi)
case ti := <-cs.timeoutTicker.Chan(): // tockChan:
// if the timeout is relevant to the rs
// go to the next step
cs.handleTimeout(ti)
case <-cs.timer.C:
cs.handleTimeout()
case <-cs.Quit:
dposlog.Info("ConsensusState recv quit signal.")
cs.timer.Stop()
return
}
}
......@@ -302,9 +272,7 @@ func (cs *ConsensusState) handleMsg(mi MsgInfo) {
}
}
func (cs *ConsensusState) handleTimeout(ti timeoutInfo) {
dposlog.Debug("Received tock", "timeout", ti.Duration, "state", StateTypeMapping[ti.State])
func (cs *ConsensusState) handleTimeout() {
// the timeout will now cause a state transition
cs.mtx.Lock()
defer cs.mtx.Unlock()
......@@ -445,14 +413,6 @@ func (cs *ConsensusState) CacheVotes(vote *dpostype.DPosVote) {
if !addrExistFlag {
cs.cachedVotes = append(cs.cachedVotes, vote)
} else if vote.VoteTimestamp > cs.cachedVotes[index].VoteTimestamp {
/*
if index == len(cs.cachedVotes) - 1 {
cs.cachedVotes = append(cs.cachedVotes, vote)
}else {
cs.cachedVotes = append(cs.cachedVotes[:index], cs.dposVotes[(index + 1):]...)
cs.cachedVotes = append(cs.cachedVotes, vote)
}
*/
cs.cachedVotes[index] = vote
}
}
......
......@@ -200,13 +200,6 @@ func DposPerf() {
fmt.Println("Verify CB failed.")
}
//fmt.Println("=======start GetCBInfoByCircle!=======")
////first time, not hit
//dposClient.csState.GetCBInfoByCircle(task.Cycle)
//time.Sleep(1 * time.Second)
////second time, hit cache
//dposClient.csState.GetCBInfoByCircle(task.Cycle)
fmt.Println("=======start VoteVerify!=======")
vote := generateVote(dposClient.csState)
if nil == vote {
......@@ -368,7 +361,6 @@ func DposPerf() {
} else {
fmt.Println("SendTopNRegistTx failed")
}
//sendTopNRegistTx(dposClient.csState, reg)
time.Sleep(2 * time.Second)
fmt.Println("=======start QueryTopNCandidators!=======")
......@@ -431,7 +423,6 @@ func createConn2() error {
return err
}
c = types.NewChain33Client(conn)
//r = rand.New(rand.NewSource(types.Now().UnixNano()))
return nil
}
......@@ -510,7 +501,6 @@ func NormPut() {
// SendCBTx method
func verifyCB(cs *ConsensusState, info *dty.DposCBInfo) bool {
//info.Pubkey = strings.ToUpper(hex.EncodeToString(cs.privValidator.GetPubKey().Bytes()))
canonical := dty.CanonicalOnceCBInfo{
Cycle: info.Cycle,
StopHeight: info.StopHeight,
......@@ -582,49 +572,6 @@ func sendRegistVrfRPTx(cs *ConsensusState, info *dty.DposVrfRPRegist) bool {
return true
}
/*
func sendTopNRegistTx(cs *ConsensusState, reg *dty.TopNCandidatorRegist) bool {
//info.Pubkey = strings.ToUpper(hex.EncodeToString(cs.privValidator.GetPubKey().Bytes()))
obj := dty.CanonicalTopNCandidator(reg.Cand)
reg.Cand.Hash = obj.ID()
reg.Cand.SignerPubkey = cs.privValidator.GetPubKey().Bytes()
byteCB, err := json.Marshal(reg.Cand)
if err != nil {
dposlog.Error("marshal TopNCandidator failed", "err", err)
}
sig, err := cs.privValidator.SignMsg(byteCB)
if err != nil {
dposlog.Error("TopNCandidator failed.", "err", err)
return false
}
reg.Cand.Signature = sig.Bytes()
tx, err := cs.client.CreateTopNRegistTx(reg)
if err != nil {
dposlog.Error("CreateTopNRegistTx failed.", "err", err)
return false
}
tx.Fee = fee
cs.privValidator.SignTx(tx)
dposlog.Info("Sign TopNRegistTx ok.")
reply, err := c.SendTransaction(context.Background(), tx)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return false
}
if !reply.IsOk {
fmt.Fprintln(os.Stderr, errors.New(string(reply.GetMsg())))
return false
}
return true
}
*/
func sendTransferTx(fromKey, to string, amount int64) bool {
signer := util.HexToPrivkey(fromKey)
var tx *types.Transaction
......
......@@ -16,6 +16,7 @@ import (
"github.com/33cn/chain33/queue"
"github.com/33cn/chain33/rpc"
"github.com/33cn/chain33/types"
ttypes "github.com/33cn/plugin/plugin/consensus/dpos/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
......@@ -454,7 +455,6 @@ func TestNode(t *testing.T) {
fmt.Println("=======start TestNode!=======")
Init()
q1, chain1, s1, mem1, exec1, cs1, p2p1 := initEnvDpos1("chain33.test1.toml")
//q2, chain2, s2, mem2, exec2, cs2, p2p2 := initEnvDpos2("chain33.test2.toml")
defer clearTestData1()
defer chain1.Close()
......@@ -465,14 +465,6 @@ func TestNode(t *testing.T) {
defer cs1.Close()
defer p2p1.Close()
//defer chain2.Close()
//defer mem2.Close()
//defer exec2.Close()
//defer s2.Close()
//defer q2.Close()
//defer cs2.Close()
//defer p2p2.Close()
time.Sleep(2 * time.Second)
_, _, err := createConn("127.0.0.1:8802")
......@@ -480,11 +472,6 @@ func TestNode(t *testing.T) {
_, _, err = createConn("127.0.0.1:8802")
}
//_, _, err = createConn("127.0.0.1:8804")
//for err != nil {
// _, _, err = createConn("127.0.0.1:8804")
//}
fmt.Println("node1 ip:", cs1.(*Client).GetNode().IP)
fmt.Println("node1 id:", cs1.(*Client).GetNode().ID)
fmt.Println("node1 network:", cs1.(*Client).GetNode().Network)
......@@ -513,8 +500,6 @@ func TestNode(t *testing.T) {
fmt.Println("TestNodeCompatibleWith ok")
//time.Sleep(2 * time.Second)
fmt.Println(q1.Name())
fmt.Println(cs1.(*Client).testFlag)
fmt.Println(cs1.(*Client).GetConsensusState() != nil)
......@@ -528,23 +513,34 @@ func TestNode(t *testing.T) {
fmt.Println(cs1.(*Client).GenesisDoc().ChainID)
fmt.Println("Validator index: ", cs1.(*Client).ValidatorIndex())
//go cs2.(*Client).GetNode().DialPeerWithAddress("127.0.0.1:36656")
//require.Nil(t, err)
//err = cs1.(*Client).GetNode().DialPeerWithAddress("127.0.0.1:36657")
//require.Nil(t, err)
time.Sleep(1 * time.Second)
//cs1.(*Client).StopC()
if cs1.(*Client).GetNode().IsRunning() {
fmt.Println("=======cs1 is running=======")
//cs1.(*Client).GetConsensusState().Stop()
//cs1.(*Client).GetNode().Stop()
} else {
fmt.Println("======= cs1 is not running=======")
}
fmt.Println("=======test state machine=======")
vote := &ttypes.DPosVote{}
InitStateObj.sendVote(cs1.(*Client).GetConsensusState(), vote)
voteReply := &ttypes.DPosVoteReply{}
InitStateObj.sendVoteReply(cs1.(*Client).GetConsensusState(), voteReply)
InitStateObj.recvVoteReply(cs1.(*Client).GetConsensusState(), voteReply)
notify := &ttypes.DPosNotify{}
InitStateObj.sendNotify(cs1.(*Client).GetConsensusState(), notify)
VotingStateObj.sendVoteReply(cs1.(*Client).GetConsensusState(), voteReply)
VotingStateObj.sendNotify(cs1.(*Client).GetConsensusState(), notify)
VotingStateObj.recvNotify(cs1.(*Client).GetConsensusState(), notify)
VotedStateObj.sendVote(cs1.(*Client).GetConsensusState(), vote)
WaitNotifyStateObj.sendVote(cs1.(*Client).GetConsensusState(), vote)
WaitNotifyStateObj.sendVoteReply(cs1.(*Client).GetConsensusState(), voteReply)
WaitNotifyStateObj.recvVoteReply(cs1.(*Client).GetConsensusState(), voteReply)
WaitNotifyStateObj.sendNotify(cs1.(*Client).GetConsensusState(), notify)
fmt.Println("=======testNode ok=======")
}
......@@ -601,43 +597,3 @@ func initEnvDpos1(configName string) (queue.Queue, *blockchain.BlockChain, queue
return q, chain, s, mem, exec, cs, network
}
/*
func initEnvDpos2(configName string) (queue.Queue, *blockchain.BlockChain, queue.Module, queue.Module, *executor.Executor, queue.Module, queue.Module) {
var q = queue.New("channel2")
flag.Parse()
cfg, sub := types.InitCfg(configName)
types.Init(cfg.Title, cfg)
chain := blockchain.New(cfg.BlockChain)
chain.SetQueueClient(q.Client())
exec := executor.New(cfg.Exec, sub.Exec)
exec.SetQueueClient(q.Client())
types.SetMinFee(0)
s := store.New(cfg.Store, sub.Store)
s.SetQueueClient(q.Client())
var subcfg subConfig
if sub != nil {
types.MustDecode(sub.Consensus["dpos"], &subcfg)
}
encode, _ := json.Marshal(subcfg)
fmt.Println(string(encode))
cs := New(cfg.Consensus, sub.Consensus["dpos"])
cs.(*Client).SetTestFlag()
cs.SetQueueClient(q.Client())
mem := mempool.New(cfg.Mempool, nil)
mem.SetQueueClient(q.Client())
network := p2p.New(cfg.P2P)
network.SetQueueClient(q.Client())
rpc.InitCfg(cfg.RPC)
gapi := rpc.NewGRpcServer(q.Client(), nil)
go gapi.Listen()
return q, chain, s, mem, exec, cs, network
}
*/
......@@ -388,17 +388,17 @@ func (init *InitState) timeOut(cs *ConsensusState) {
cs.ClearVotes()
//设定超时时间,超时后再检查链接数量
cs.scheduleDPosTimeout(time.Duration(timeoutCheckConnections)*time.Millisecond, InitStateType)
cs.resetTimer(time.Duration(timeoutCheckConnections)*time.Millisecond, InitStateType)
} else {
vote := generateVote(cs)
if nil == vote {
cs.scheduleDPosTimeout(time.Duration(timeoutCheckConnections)*time.Millisecond, InitStateType)
cs.resetTimer(time.Duration(timeoutCheckConnections)*time.Millisecond, InitStateType)
return
}
if err := cs.privValidator.SignVote(cs.validatorMgr.ChainID, vote); err != nil {
dposlog.Error("SignVote failed", "vote", vote.String())
cs.scheduleDPosTimeout(time.Duration(timeoutCheckConnections)*time.Millisecond, InitStateType)
cs.resetTimer(time.Duration(timeoutCheckConnections)*time.Millisecond, InitStateType)
return
}
......@@ -413,7 +413,7 @@ func (init *InitState) timeOut(cs *ConsensusState) {
dposlog.Info("VotingState send a vote", "vote info", printVote(vote.DPosVote), "localNodeIndex", cs.client.ValidatorIndex(), "now", time.Now().Unix())
cs.dposState.sendVote(cs, vote.DPosVote)
cs.scheduleDPosTimeout(time.Duration(timeoutVoting)*time.Millisecond, VotingStateType)
cs.resetTimer(time.Duration(timeoutVoting)*time.Millisecond, VotingStateType)
//处理之前缓存的投票信息
for i := 0; i < len(cs.cachedVotes); i++ {
cs.dposState.recvVote(cs, cs.cachedVotes[i])
......@@ -480,7 +480,7 @@ func (voting *VotingState) timeOut(cs *ConsensusState) {
}
}
//1s后检查是否出块,是否需要重新投票
cs.scheduleDPosTimeout(time.Millisecond*500, VotedStateType)
cs.resetTimer(time.Millisecond*500, VotedStateType)
}
return
}
......@@ -494,7 +494,7 @@ func (voting *VotingState) timeOut(cs *ConsensusState) {
dposlog.Info("Change state because of timeOut.", "from", "VotingState", "to", "InitState")
//由于连接多数情况下正常,快速触发InitState的超时处理
cs.scheduleDPosTimeout(time.Duration(timeoutCheckConnections)*time.Millisecond, InitStateType)
cs.resetTimer(time.Duration(timeoutCheckConnections)*time.Millisecond, InitStateType)
}
func (voting *VotingState) sendVote(cs *ConsensusState, vote *dpostype.DPosVote) {
......@@ -529,7 +529,7 @@ func (voting *VotingState) recvVote(cs *ConsensusState, vote *dpostype.DPosVote)
}
}
//1s后检查是否出块,是否需要重新投票
cs.scheduleDPosTimeout(time.Millisecond*500, VotedStateType)
cs.resetTimer(time.Millisecond*500, VotedStateType)
} else if result == continueToVote {
dposlog.Info("VotingState get a vote, but don't get an agreement,waiting for new votes...")
} else {
......@@ -538,7 +538,7 @@ func (voting *VotingState) recvVote(cs *ConsensusState, vote *dpostype.DPosVote)
cs.ClearVotes()
cs.SetState(InitStateObj)
dposlog.Info("Change state because of vote failed.", "from", "VotingState", "to", "InitState")
cs.scheduleDPosTimeout(time.Duration(timeoutCheckConnections)*time.Millisecond, InitStateType)
cs.resetTimer(time.Duration(timeoutCheckConnections)*time.Millisecond, InitStateType)
}
}
......@@ -580,7 +580,7 @@ func (voted *VotedState) timeOut(cs *ConsensusState) {
//如果区块未同步,则等待;如果区块已同步,则进行后续正常出块的判断和处理。
if block.Height+1 < cs.currentVote.Height {
dposlog.Info("VotedState timeOut but block is not sync,wait...", "localHeight", block.Height, "vote height", cs.currentVote.Height)
cs.scheduleDPosTimeout(time.Second*1, VotedStateType)
cs.resetTimer(time.Second*1, VotedStateType)
return
}
......@@ -639,7 +639,7 @@ func (voted *VotedState) timeOut(cs *ConsensusState) {
cs.SetState(InitStateObj)
dposlog.Info("Change state because of time.", "from", "VotedState", "to", "InitState")
cs.scheduleDPosTimeout(time.Duration(timeoutCheckConnections)*time.Millisecond, InitStateType)
cs.resetTimer(time.Duration(timeoutCheckConnections)*time.Millisecond, InitStateType)
return
}
......@@ -659,7 +659,7 @@ func (voted *VotedState) timeOut(cs *ConsensusState) {
cs.SetState(InitStateObj)
dposlog.Info("Change state because of time.", "from", "VotedState", "to", "InitState")
cs.scheduleDPosTimeout(time.Duration(timeoutCheckConnections)*time.Millisecond, InitStateType)
cs.resetTimer(time.Duration(timeoutCheckConnections)*time.Millisecond, InitStateType)
return
}
......@@ -674,7 +674,7 @@ func (voted *VotedState) timeOut(cs *ConsensusState) {
if block.BlockTime >= task.BlockStop {
//已出块,或者时间落后了。
dposlog.Info("VotedState timeOut but block already is generated.", "blocktime", block.BlockTime, "blockStop", task.BlockStop, "now", now)
cs.scheduleDPosTimeout(time.Second*1, VotedStateType)
cs.resetTimer(time.Second*1, VotedStateType)
return
} else if block.BlockTime < task.BlockStart {
......@@ -684,12 +684,12 @@ func (voted *VotedState) timeOut(cs *ConsensusState) {
cs.client.SetBlockTime(task.BlockStop)
cs.client.CreateBlock()
cs.scheduleDPosTimeout(time.Millisecond*500, VotedStateType)
cs.resetTimer(time.Millisecond*500, VotedStateType)
return
}
dposlog.Info("Wait time to create block near blockStop.")
cs.scheduleDPosTimeout(time.Millisecond*500, VotedStateType)
cs.resetTimer(time.Millisecond*500, VotedStateType)
return
} else {
......@@ -697,7 +697,7 @@ func (voted *VotedState) timeOut(cs *ConsensusState) {
dposlog.Info("Wait to next block cycle.", "waittime", task.BlockStop-now+1)
//cs.scheduleDPosTimeout(time.Second * time.Duration(task.blockStop-now+1), VotedStateType)
cs.scheduleDPosTimeout(time.Millisecond*500, VotedStateType)
cs.resetTimer(time.Millisecond*500, VotedStateType)
return
}
} else {
......@@ -717,7 +717,7 @@ func (voted *VotedState) timeOut(cs *ConsensusState) {
cs.ClearVotes()
cs.SetState(WaitNotifyStateObj)
dposlog.Info("Change state because of time.", "from", "VotedState", "to", "WaitNotifyState")
cs.scheduleDPosTimeout(time.Duration(timeoutWaitNotify)*time.Millisecond, WaitNotifyStateType)
cs.resetTimer(time.Duration(timeoutWaitNotify)*time.Millisecond, WaitNotifyStateType)
if cs.cachedNotify != nil {
cs.dposState.recvNotify(cs, cs.cachedNotify)
}
......@@ -726,7 +726,7 @@ func (voted *VotedState) timeOut(cs *ConsensusState) {
//设置超时时间
dposlog.Info("wait until change state.", "waittime", cs.currentVote.PeriodStop-now+1)
cs.scheduleDPosTimeout(time.Second*time.Duration(cs.currentVote.PeriodStop-now+1), VotedStateType)
cs.resetTimer(time.Second*time.Duration(cs.currentVote.PeriodStop-now+1), VotedStateType)
return
}
}
......@@ -776,7 +776,7 @@ func (voted *VotedState) recvNotify(cs *ConsensusState, notify *dpostype.DPosNot
cs.ClearVotes()
cs.SetState(WaitNotifyStateObj)
dposlog.Info("Change state because of recv notify.", "from", "VotedState", "to", "WaitNotifyState")
cs.scheduleDPosTimeout(time.Duration(timeoutWaitNotify)*time.Millisecond, WaitNotifyStateType)
cs.resetTimer(time.Duration(timeoutWaitNotify)*time.Millisecond, WaitNotifyStateType)
if cs.cachedNotify != nil {
cs.dposState.recvNotify(cs, cs.cachedNotify)
}
......@@ -806,7 +806,7 @@ func (wait *WaitNofifyState) timeOut(cs *ConsensusState) {
cs.SetState(InitStateObj)
dposlog.Info("Change state because of time.", "from", "WaitNofifyState", "to", "InitState")
cs.scheduleDPosTimeout(time.Duration(timeoutCheckConnections)*time.Millisecond, InitStateType)
cs.resetTimer(time.Duration(timeoutCheckConnections)*time.Millisecond, InitStateType)
}
func (wait *WaitNofifyState) sendVote(cs *ConsensusState, vote *dpostype.DPosVote) {
......
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dpos
import (
"time"
)
var (
tickTockBufferSize = 10
)
// TimeoutTicker is a timer that schedules timeouts
// conditional on the height/round/step in the timeoutInfo.
// The timeoutInfo.Duration may be non-positive.
type TimeoutTicker interface {
Start()
Stop()
Chan() <-chan timeoutInfo // on which to receive a timeout
ScheduleTimeout(ti timeoutInfo) // reset the timer
}
// timeoutTicker wraps time.Timer,
// scheduling timeouts only for greater height/round/step
// than what it's already seen.
// Timeouts are scheduled along the tickChan,
// and fired on the tockChan.
type timeoutTicker struct {
timer *time.Timer
tickChan chan timeoutInfo // for scheduling timeouts
tockChan chan timeoutInfo // for notifying about them
}
// NewTimeoutTicker returns a new TimeoutTicker.
func NewTimeoutTicker() TimeoutTicker {
tt := &timeoutTicker{
timer: time.NewTimer(0),
tickChan: make(chan timeoutInfo, tickTockBufferSize),
tockChan: make(chan timeoutInfo, tickTockBufferSize),
}
tt.stopTimer() // don't want to fire until the first scheduled timeout
return tt
}
// OnStart implements cmn.Service. It starts the timeout routine.
func (t *timeoutTicker) Start() {
go t.timeoutRoutine()
}
// OnStop implements cmn.Service. It stops the timeout routine.
func (t *timeoutTicker) Stop() {
t.stopTimer()
}
// Chan returns a channel on which timeouts are sent.
func (t *timeoutTicker) Chan() <-chan timeoutInfo {
return t.tockChan
}
// ScheduleTimeout schedules a new timeout by sending on the internal tickChan.
// The timeoutRoutine is always available to read from tickChan, so this won't block.
// The scheduling may fail if the timeoutRoutine has already scheduled a timeout for a later height/round/step.
func (t *timeoutTicker) ScheduleTimeout(ti timeoutInfo) {
t.tickChan <- ti
}
//-------------------------------------------------------------
// stop the timer and drain if necessary
func (t *timeoutTicker) stopTimer() {
// Stop() returns false if it was already fired or was stopped
if !t.timer.Stop() {
select {
case <-t.timer.C:
default:
dposlog.Debug("Timer already stopped")
}
}
}
// send on tickChan to start a new timer.
// timers are interupted and replaced by new ticks from later steps
// timeouts of 0 on the tickChan will be immediately relayed to the tockChan
func (t *timeoutTicker) timeoutRoutine() {
dposlog.Debug("Starting timeout routine")
var ti timeoutInfo
for {
select {
case newti := <-t.tickChan:
dposlog.Debug("Received tick", "old_ti", ti, "new_ti", newti)
// stop the last timer
t.stopTimer()
// update timeoutInfo and reset timer
// NOTE time.Timer allows duration to be non-positive
ti = newti
t.timer.Reset(ti.Duration)
dposlog.Debug("Scheduled timeout", "dur", ti.Duration)
case <-t.timer.C:
dposlog.Info("Timed out", "dur", ti.Duration, "state", StateTypeMapping[ti.State])
// go routine here guarantees timeoutRoutine doesn't block.
// Determinism comes from playback in the receiveRoutine.
// We can eliminate it by merging the timeoutRoutine into receiveRoutine
// and managing the timeouts ourselves with a millisecond ticker
go func(toi timeoutInfo) { t.tockChan <- toi }(ti)
}
}
}
package dpos
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestTicker(t *testing.T) {
ticker := NewTimeoutTicker()
ticker.Start()
ti := timeoutInfo{
Duration: time.Second * time.Duration(3),
State: InitStateType,
}
fmt.Println("timeoutInfo:", ti.String())
now := time.Now().Unix()
ticker.ScheduleTimeout(ti)
<-ticker.Chan()
end := time.Now().Unix()
ticker.Stop()
assert.True(t, end-now >= 2)
fmt.Println("TestTicker ok")
}
......@@ -69,20 +69,23 @@ func initEnvRaft() (queue.Queue, *blockchain.BlockChain, queue.Module, queue.Mod
flag.Parse()
cfg, sub := types.InitCfg("chain33.test.toml")
types.Init(cfg.Title, cfg)
s := store.New(cfg.Store, sub.Store)
s.SetQueueClient(q.Client())
chain := blockchain.New(cfg.BlockChain)
chain.SetQueueClient(q.Client())
exec := executor.New(cfg.Exec, sub.Exec)
exec.SetQueueClient(q.Client())
types.SetMinFee(0)
s := store.New(cfg.Store, sub.Store)
s.SetQueueClient(q.Client())
mem := mempool.New(cfg.Mempool, nil)
mem.SetQueueClient(q.Client())
cs := NewRaftCluster(cfg.Consensus, sub.Consensus["raft"])
cs.SetQueueClient(q.Client())
mem := mempool.New(cfg.Mempool, nil)
mem.SetQueueClient(q.Client())
network := p2p.New(cfg.P2P)
network.SetQueueClient(q.Client())
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment