Commit 46c31a88 authored by 王志文's avatar 王志文

Merge branch 'init_plugin' into 'develop'

init all plugin See merge request chain33/plugin!2
parents 297c6938 e0a7bda6
package plugin
import (
_ "gitlab.33.cn/chain33/chain33/plugin/consensus/para"
_ "gitlab.33.cn/chain33/chain33/plugin/consensus/pbft"
_ "gitlab.33.cn/chain33/chain33/plugin/consensus/raft"
_ "gitlab.33.cn/chain33/chain33/plugin/consensus/tendermint"
_ "gitlab.33.cn/chain33/chain33/plugin/consensus/ticket"
)
This diff is collapsed.
package para
import (
"github.com/stretchr/testify/assert"
//"github.com/stretchr/testify/mock"
"testing"
"gitlab.33.cn/chain33/chain33/types"
"math/rand"
"time"
"gitlab.33.cn/chain33/chain33/common/address"
pt "gitlab.33.cn/chain33/chain33/plugin/dapp/paracross/types"
)
var (
Amount = int64(1 * types.Coin)
Title = string("user.p.para.")
)
func TestFilterTxsForPara(t *testing.T) {
types.Init(Title, nil)
//only main txs
tx0, _ := createMainTx("ticket", "to")
//only main txs group
tx1, _ := createMainTx("ticket", "to")
tx2, _ := createMainTx("token", "to")
tx12 := []*types.Transaction{tx1, tx2}
txGroup12, err := createTxsGroup(tx12)
assert.Nil(t, err)
//para cross tx group succ
tx3, _ := createCrossMainTx("toA")
tx4, err := createCrossParaTx("toB", 4)
assert.Nil(t, err)
tx34 := []*types.Transaction{tx3, tx4}
txGroup34, err := createTxsGroup(tx34)
assert.Nil(t, err)
//all para tx group
tx5, err := createCrossParaTx("toB", 5)
assert.Nil(t, err)
tx6, err := createCrossParaTx("toB", 6)
assert.Nil(t, err)
tx56 := []*types.Transaction{tx5, tx6}
txGroup56, err := createTxsGroup(tx56)
assert.Nil(t, err)
//para cross tx group fail
tx7, _ := createCrossMainTx("toA")
tx8, err := createCrossParaTx("toB", 8)
assert.Nil(t, err)
tx78 := []*types.Transaction{tx7, tx8}
txGroup78, err := createTxsGroup(tx78)
assert.Nil(t, err)
tx9, _ := createMainTx("relay", "to")
//single para tx
txA, err := createCrossParaTx("toB", 10)
assert.Nil(t, err)
//all para tx group
txB, err := createCrossParaTx("toB", 11)
assert.Nil(t, err)
txC, err := createCrossParaTx("toB", 12)
assert.Nil(t, err)
txBC := []*types.Transaction{txB, txC}
txGroupBC, err := createTxsGroup(txBC)
assert.Nil(t, err)
txs := []*types.Transaction{tx0}
txs = append(txs, txGroup12...)
txs = append(txs, txGroup34...)
txs = append(txs, txGroup56...)
txs = append(txs, txGroup78...)
txs = append(txs, tx9, txA)
txs = append(txs, txGroupBC...)
//for i, tx := range txs {
// t.Log("tx exec name", "i", i, "name", string(tx.Execer))
//}
recpt0 := &types.ReceiptData{Ty: types.ExecOk}
recpt1 := &types.ReceiptData{Ty: types.ExecOk}
recpt2 := &types.ReceiptData{Ty: types.ExecOk}
recpt3 := &types.ReceiptData{Ty: types.ExecOk}
recpt4 := &types.ReceiptData{Ty: types.ExecOk}
recpt5 := &types.ReceiptData{Ty: types.ExecPack}
recpt6 := &types.ReceiptData{Ty: types.ExecPack}
log7 := &types.ReceiptLog{Ty: types.TyLogErr}
logs := []*types.ReceiptLog{log7}
recpt7 := &types.ReceiptData{Ty: types.ExecPack, Logs: logs}
recpt8 := &types.ReceiptData{Ty: types.ExecPack}
recpt9 := &types.ReceiptData{Ty: types.ExecOk}
recptA := &types.ReceiptData{Ty: types.ExecPack}
recptB := &types.ReceiptData{Ty: types.ExecPack}
recptC := &types.ReceiptData{Ty: types.ExecPack}
receipts := []*types.ReceiptData{recpt0, recpt1, recpt2, recpt3, recpt4, recpt5,
recpt6, recpt7, recpt8, recpt9, recptA, recptB, recptC}
block := &types.Block{Txs: txs}
detail := &types.BlockDetail{
Block: block,
Receipts: receipts,
}
para := &ParaClient{}
rst := para.FilterTxsForPara(detail)
filterTxs := []*types.Transaction{tx3, tx4, tx5, tx6, txA, txB, txC}
assert.Equal(t, filterTxs, rst)
}
func createMainTx(exec string, to string) (*types.Transaction, error) {
param := types.CreateTx{
To: to,
Amount: Amount,
Fee: 0,
Note: "test",
TokenSymbol: "",
ExecName: exec,
}
transfer := &pt.ParacrossAction{}
v := &pt.ParacrossAction_AssetTransfer{AssetTransfer: &types.AssetsTransfer{
Amount: param.Amount, Note: param.GetNote(), To: param.GetTo()}}
transfer.Value = v
transfer.Ty = pt.ParacrossActionAssetTransfer
tx := &types.Transaction{
Execer: []byte(param.GetExecName()),
Payload: types.Encode(transfer),
To: address.ExecAddress(param.GetExecName()),
Fee: param.Fee,
Nonce: rand.New(rand.NewSource(time.Now().UnixNano())).Int63(),
}
return tx, nil
}
func createCrossMainTx(to string) (*types.Transaction, error) {
param := types.CreateTx{
To: string(to),
Amount: Amount,
Fee: 0,
Note: "test asset transfer",
IsWithdraw: false,
IsToken: false,
TokenSymbol: "",
ExecName: pt.ParaX,
}
transfer := &pt.ParacrossAction{}
v := &pt.ParacrossAction_AssetTransfer{AssetTransfer: &types.AssetsTransfer{
Amount: param.Amount, Note: param.GetNote(), To: param.GetTo()}}
transfer.Value = v
transfer.Ty = pt.ParacrossActionAssetTransfer
tx := &types.Transaction{
Execer: []byte(param.GetExecName()),
Payload: types.Encode(transfer),
To: address.ExecAddress(param.GetExecName()),
Fee: param.Fee,
Nonce: rand.New(rand.NewSource(time.Now().UnixNano())).Int63(),
}
return tx, nil
}
func createCrossParaTx(to string, amount int64) (*types.Transaction, error) {
param := types.CreateTx{
To: string(to),
Amount: amount,
Fee: 0,
Note: "test asset transfer",
IsWithdraw: false,
IsToken: false,
TokenSymbol: "",
ExecName: types.ExecName(pt.ParaX),
}
tx, err := pt.CreateRawAssetTransferTx(&param)
return tx, err
}
func createTxsGroup(txs []*types.Transaction) ([]*types.Transaction, error) {
group, err := types.CreateTxGroup(txs)
if err != nil {
return nil, err
}
err = group.Check(0, types.GInt("MinFee"))
if err != nil {
return nil, err
}
return group.Txs, nil
}
This diff is collapsed.
# paracross 参与多节点共识,发送共识消息给主链
## 平行链交易
1. 过滤主链里面符合平行链title的平行链交易
1. 如果涉及跨链合约,如果有超过两条平行链的交易被判定为失败,交易组会执行不成功。(这样的情况下,主链交易一定会执行不成功)
1. 如果不涉及跨链合约,那么交易组没有任何规定,可以是20比,10条链。 如果主链交易有失败,平行链也不会执行
1. 如果交易组有一个ExecOk,主链上的交易都是ok的,可以全部打包
1. 如果全部是ExecPack,有两种情况,一是交易组所有交易都是平行链交易,另一是主链有交易失败而打包了的交易,需要检查LogErr,如果有错,全部不打包
1. 平行链发给主链的交易执行结果位图,只包含打包了的平行链tx,若是跨链交易且因主链执行失败而未打包进区块的平行链交易,不包含在位图内。
## 初始启动
1. 共识tick(16s)会定期通过grpc获取当前共识height
* 如果各节点都是创世启动,则返回-1,则进入sync环节,发起共识消息
* 如果是本节点重启或一个全新节点,则主动同步其他节点数据,在同步过程中,不获取共识数据,也不发送共识消息,同步结束后,获取当前共识高度,在当前
共识高度之前的区块,不发送共识,从当前共识节点开始发送共识消息,进入sync状态,参与共识
## 新节点增加或重启(包括空块)
1. 节点重启,启动检查当前共识高度,如果首次发送的高度高于共识高度,会从共识高度的下一个高度全部发送一遍
1. 新节点, 新节点启动后同步主链数据,低于共识高度的区块不发送共识消息,直到大于共识高度才发。
## 分叉,节点回滚
1. delete的高度如果当前正在发送,取消当前的发送,不取消有可能会失败原因一直发
## 普通执行
1.如果收到主链block,检查是否当前的交易在block里面且执行成功,如果执行失败或pack,都不算上链,都需要重发。
## 签名
1. 根据配置的地址从wallet导出私钥,利用私钥在平行链共识签名。如果钱包处于锁定状态,钱包侧需要设置一个错误码提示用户,平行链侧会持续每隔2s发送查询,
直到解锁钱包,查询成功,清除错误码。
## 失败场景
1. grpc链路失败,会1s超时重发,一直失败一直发,如果期间tx两个块没发现重发,重发的tx也会更新为新的tx,为了防止mempool当做重复交易,tx的nonce会变
1. 交易费不够,交易失败
1. 平行链已经发送commit msg,主链回滚,主链找不到commit msg对应的块,平行链重复发送,直到平行链回滚把sending取消
1. 平行链主链分叉,主链执行其他平行链发来的交易将失败,自己的会成功,主链分叉回滚后恢复
1. 主链都正常,平行链从创始开始就没有达成共识,需要debug
1. 主链正常,某平行链自己计算有问题,不能和别人产生共识,此平行链提交的交易会失败,但仍然过滤交易产生区块,不影响共识,如果成功的不足2/3节点,共识
将停止不走,各平行链自己仍产生区块,平行链自身问题,需要debug
1. 平行链全部在某一高度全部崩溃,共识高度落后区块高度,待节点重启后,共识可能有空洞,需要避免.也就是以共识高度为起点,小于共识高度的不需要发,
大于共识高度,小于正在发送的高度的共识,需要从数据库获取出来重新发出去
1. 因为某种原因,比如超过2/3节点崩溃或者数据不一致,系统在某一个高度没有产生共识,共识系统会把已收到的交易记下,即便记录已经达到共识但是因为共识高度
并不是连续的,或者说因为共识空洞,后面来的共识也只是记录,不会触发done,只有和数据库共识连续的共识commit才能触发done,所以一旦产生空洞,需要
从共识开始处连续发送后续交易,而不能只发送空洞的共识数据
## 发送失败策略
1. 当前策略是是要么单个交易,要么一个交易组发送共识消息,要么全部成功,要么全部失败,如果失败,也就是交易在新块里面找不到,超过2个块会重发当前
sending里面的交易,新的共识消息会一直等待,如果当前sending的tx一直没有进入主块,后面高度的共识消息将一直得不到发送。消息失败的场景除了链路
失败之外基本就是分叉导致的,当前策略目前失败场景看没问题。
1. 另一种可能的策略是有新来的交易和当前的一起发,这样最好是每个高度一个交易,而不能交易组,分别检查交易入链情况,如果没入链的交易重发,这种策略场景
有些复杂,而且共识交易如果高的共识成功,低的失败了,意义也不大,所以当前采取的第一种发送策略
## 测试场景
1. 主节点和平行链节点在一个docker里面启动,平行链节点晚于主节点120s启动,基本上是主节点8个高度时候
1. 6个节点,4个平行链节点,两个出空块间隔是4,另两个是3,不能达成共识
1. 6个节点,4个平行链节点, 三个出空块间隔4,一个3,可以达成共识
1. 6个节点,4个平行链节点,2个先启动,不能共识,另一个或两个后面启动,能完成共识
1. 6个节点,4个平行链节点,2个先启动,不能共识,另一个或两个后面启动,能完成共识,然后再停前两个,不能共识,然后再启动前面的一个或两个,完成共识
1. 6个节点,4个平行链节点,三个先启动,第四个过10分钟后启动,启动后会同步其他节点数据,从当前共识节点开始发送
1. 6个节点,4个平行链节点,执行到某个高度全部重启,此高度执行成功但没有发送共识,重启后查看是否能把未发送的共识重新发送
1. 6个节点,4个平行链节点,三三分组,其中a组有三个平行链节点,b组只有一个,分叉测试,先停b组,然后停a组起b组,然后起a组一起挖矿,b组在单独挖矿时候,
平行链无法共识,停留在当前高度,待a组启动后,b组分叉节点回滚,重新达成共识,b组平行链也共识成功
## 硬分叉场景
1. 新的版本增加了挖矿tx,如果所有节点都没有共识,也就是共识为-1,可以删除所有平行链节点数据库,升级代码重新执行,不影响共识
1. 如果节点有共识,共识高度未N,commit msg需要设置>N才加入挖矿交易,也需要删除所有平行链数据库,主链数据库不动
1. 如果节点有共识,高度为N,平行链不删除已有数据库,更新版本,需要主链设置一个尚未达到的高度为共识分叉点,平行链侧不需要设置,共识高度以前的不发送。
\ No newline at end of file
package para
import (
"math/rand"
"testing"
"time"
"github.com/pkg/errors"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
"gitlab.33.cn/chain33/chain33/blockchain"
"gitlab.33.cn/chain33/chain33/common/log"
"gitlab.33.cn/chain33/chain33/executor"
"gitlab.33.cn/chain33/chain33/mempool"
"gitlab.33.cn/chain33/chain33/p2p"
//_ "gitlab.33.cn/chain33/chain33/plugin/dapp/paracross"
pp "gitlab.33.cn/chain33/chain33/plugin/dapp/paracross/executor"
//"gitlab.33.cn/chain33/chain33/plugin/dapp/paracross/rpc"
pt "gitlab.33.cn/chain33/chain33/plugin/dapp/paracross/types"
"gitlab.33.cn/chain33/chain33/queue"
"gitlab.33.cn/chain33/chain33/store"
_ "gitlab.33.cn/chain33/chain33/system"
"gitlab.33.cn/chain33/chain33/types"
typesmocks "gitlab.33.cn/chain33/chain33/types/mocks"
)
var random *rand.Rand
func init() {
types.Init("user.p.para.", nil)
pp.Init("paracross", nil)
random = rand.New(rand.NewSource(types.Now().UnixNano()))
consensusInterval = 2
log.SetLogLevel("debug")
}
type suiteParaCommitMsg struct {
// Include our basic suite logic.
suite.Suite
para *ParaClient
grpcCli *typesmocks.Chain33Client
q queue.Queue
block *blockchain.BlockChain
exec *executor.Executor
store queue.Module
mem *mempool.Mempool
network *p2p.P2p
}
func initConfigFile() (*types.Config, *types.ConfigSubModule) {
cfg, sub := types.InitCfg("../../../plugin/dapp/paracross/cmd/build/chain33.para.test.toml")
return cfg, sub
}
func (s *suiteParaCommitMsg) initEnv(cfg *types.Config, sub *types.ConfigSubModule) {
q := queue.New("channel")
s.q = q
//api, _ = client.New(q.Client(), nil)
s.block = blockchain.New(cfg.BlockChain)
s.block.SetQueueClient(q.Client())
s.exec = executor.New(cfg.Exec, sub.Exec)
s.exec.SetQueueClient(q.Client())
s.store = store.New(cfg.Store, sub.Store)
s.store.SetQueueClient(q.Client())
s.para = New(cfg.Consensus, sub.Consensus["para"]).(*ParaClient)
s.grpcCli = &typesmocks.Chain33Client{}
//data := &types.Int64{1}
s.grpcCli.On("GetLastBlockSequence", mock.Anything, mock.Anything).Return(nil, errors.New("nil"))
reply := &types.Reply{IsOk: true}
s.grpcCli.On("IsSync", mock.Anything, mock.Anything).Return(reply, nil)
result := &pt.ParacrossStatus{Height: -1}
data := types.Encode(result)
ret := &types.Reply{IsOk: true, Msg: data}
s.grpcCli.On("QueryChain", mock.Anything, mock.Anything).Return(ret, nil).Maybe()
s.grpcCli.On("SendTransaction", mock.Anything, mock.Anything).Return(reply, nil).Maybe()
s.para.grpcClient = s.grpcCli
s.para.SetQueueClient(q.Client())
s.mem = mempool.New(cfg.MemPool)
s.mem.SetQueueClient(q.Client())
s.mem.SetSync(true)
s.mem.WaitPollLastHeader()
s.network = p2p.New(cfg.P2P)
s.network.SetQueueClient(q.Client())
s.para.wg.Add(1)
go walletProcess(q, s.para)
}
func walletProcess(q queue.Queue, para *ParaClient) {
defer para.wg.Done()
client := q.Client()
client.Sub("wallet")
for {
select {
case <-para.commitMsgClient.quit:
return
case msg := <-client.Recv():
if msg.Ty == types.EventDumpPrivkey {
msg.Reply(client.NewMessage("", types.EventHeader, &types.ReplyString{"6da92a632ab7deb67d38c0f6560bcfed28167998f6496db64c258d5e8393a81b"}))
}
}
}
}
func (s *suiteParaCommitMsg) SetupSuite() {
s.initEnv(initConfigFile())
}
func (s *suiteParaCommitMsg) TestRun_1() {
//s.testGetBlock()
lastBlock, err := s.para.RequestLastBlock()
if err != nil {
plog.Error("para test", "err", err.Error())
}
plog.Info("para test---------", "last height", lastBlock.Height)
s.para.createBlock(lastBlock, nil, 0, getMainBlock(1, lastBlock.BlockTime+1))
lastBlock, err = s.para.RequestLastBlock()
if err != nil {
plog.Error("para test--2", "err", err.Error())
}
plog.Info("para test---------", "last height", lastBlock.Height)
s.para.createBlock(lastBlock, nil, 1, getMainBlock(2, lastBlock.BlockTime+1))
time.Sleep(time.Second * 3)
lastBlock, err = s.para.RequestLastBlock()
s.para.DelBlock(lastBlock, 2)
time.Sleep(time.Second * 3)
}
func TestRunSuiteParaCommitMsg(t *testing.T) {
log := new(suiteParaCommitMsg)
suite.Run(t, log)
}
func (s *suiteParaCommitMsg) TearDownSuite() {
time.Sleep(time.Second * 5)
s.block.Close()
s.para.Close()
s.exec.Close()
s.store.Close()
s.mem.Close()
s.network.Close()
s.q.Close()
}
func getMainBlock(height int64, BlockTime int64) *types.Block {
return &types.Block{
Height: height,
BlockTime: BlockTime,
}
}
t=2018-09-05T17:07:56+0800 lvl=info msg="init::LoadBlockStoreHeight::database may be crash" module=blockchain err=ErrHeightNotExist
t=2018-09-05T17:07:56+0800 lvl=info msg="load block height error, may be init database" module=blockchain height=-1
t=2018-09-05T17:07:56+0800 lvl=info msg=InitIndexAndBestView module=blockchain cost=485.295µs
t=2018-09-05T17:07:56+0800 lvl=info msg=GetDbVersion module=blockchain submodule=store err=ErrNotFoundInDb
t=2018-09-05T17:07:56+0800 lvl=info msg=SetDbVersion module=blockchain submodule=store blcokchain db version=1
t=2018-09-05T17:07:56+0800 lvl=info msg="mempool piple line start" module=mempool
t=2018-09-05T17:07:56+0800 lvl=info msg="pollLastHeader quit" module=mempool
t=2018-09-05T17:08:29+0800 lvl=info msg="init::LoadBlockStoreHeight::database may be crash" module=blockchain err=ErrHeightNotExist
t=2018-09-05T17:08:29+0800 lvl=info msg="load block height error, may be init database" module=blockchain height=-1
t=2018-09-05T17:08:29+0800 lvl=info msg=InitIndexAndBestView module=blockchain cost=1.503171ms
t=2018-09-05T17:08:29+0800 lvl=info msg=GetDbVersion module=blockchain submodule=store blockchain db version=1
t=2018-09-05T17:08:29+0800 lvl=info msg="mempool piple line start" module=mempool
t=2018-09-05T17:08:29+0800 lvl=info msg="pollLastHeader quit" module=mempool
t=2018-09-05T17:08:29+0800 lvl=info msg="Enter store mavl" module=store
t=2018-09-05T17:08:29+0800 lvl=info msg="start to creat pbft node" module=Pbft
t=2018-09-05T17:08:29+0800 lvl=info msg="Enter consensus pbft"
t=2018-09-05T17:08:29+0800 lvl=info msg="Enter SetQueue method of pbft consensus" module=Pbft
t=2018-09-05T17:08:29+0800 lvl=info msg="Enter SetQueueClient method of consensus"
t=2018-09-05T17:08:29+0800 lvl=info msg=ExecBlock module=util height=0 ntx=1 writebatchsync=false cost=411.009µs
t=2018-09-05T17:08:29+0800 lvl=info msg=EventAddBlockDetail module=blockchain height=0 hash=935cb32d5a24e56967c7a0a9bf357e447fcaa082d703159ccb3a04bf9ae8ec32
t=2018-09-05T17:08:29+0800 lvl=info msg=p2p module=p2p Version=118
t=2018-09-05T17:08:29+0800 lvl=info msg=p2p module=p2p InnerBounds=500
t=2018-09-05T17:08:29+0800 lvl=info msg=p2p module=p2p setqueuecliet=ok
t=2018-09-05T17:08:29+0800 lvl=info msg=DetectNodeAddr module=p2p addr:=192.168.0.141
t=2018-09-05T17:08:29+0800 lvl=info msg=detectNodeAddr module=p2p LocalAddr=192.168.0.141
t=2018-09-05T17:08:29+0800 lvl=info msg=ShowTaskCapcity module=p2p Capcity=1000
t=2018-09-05T17:08:29+0800 lvl=eror msg=GetFeeAmount module=wallet submodule=store Get from db error=ErrNotFoundInDb
t=2018-09-05T17:08:29+0800 lvl=eror msg=getVersion module=wallet.privacy db.Get error=ErrNotFoundInDb
t=2018-09-05T17:08:29+0800 lvl=eror msg="GetAccountByPrefix addr not exist" wallet=store
t=2018-09-05T17:08:29+0800 lvl=info msg=GetWalletAccounts module=wallet GetAccountByPrefix:err=ErrAccountNotExist
t=2018-09-05T17:08:29+0800 lvl=eror msg=rescanAllTxToUpdateUTXOs module=wallet.privacy walletOperate.GetWalletAccounts error=ErrAccountNotExist
t=2018-09-05T17:08:29+0800 lvl=eror msg=GetAutoMinerFlag module=wallet.ticket Get error=ErrNotFoundInDb
t=2018-09-05T17:08:29+0800 lvl=info msg="Begin auto mining" module=wallet.ticket
package pbft
import (
"time"
"gitlab.33.cn/chain33/chain33/common/merkle"
"gitlab.33.cn/chain33/chain33/queue"
drivers "gitlab.33.cn/chain33/chain33/system/consensus"
cty "gitlab.33.cn/chain33/chain33/system/dapp/coins/types"
"gitlab.33.cn/chain33/chain33/types"
)
func init() {
drivers.Reg("pbft", NewPbft)
drivers.QueryData.Register("pbft", &PbftClient{})
}
type PbftClient struct {
*drivers.BaseClient
replyChan chan *types.ClientReply
requestChan chan *types.Request
isPrimary bool
}
func NewBlockstore(cfg *types.Consensus, replyChan chan *types.ClientReply, requestChan chan *types.Request, isPrimary bool) *PbftClient {
c := drivers.NewBaseClient(cfg)
client := &PbftClient{BaseClient: c, replyChan: replyChan, requestChan: requestChan, isPrimary: isPrimary}
c.SetChild(client)
return client
}
func (client *PbftClient) ProcEvent(msg queue.Message) bool {
return false
}
func (client *PbftClient) Propose(block *types.Block) {
op := &types.Operation{block}
req := ToRequestClient(op, types.Now().String(), clientAddr)
client.requestChan <- req
}
func (client *PbftClient) CheckBlock(parent *types.Block, current *types.BlockDetail) error {
return nil
}
func (client *PbftClient) SetQueueClient(c queue.Client) {
plog.Info("Enter SetQueue method of pbft consensus")
client.InitClient(c, func() {
client.InitBlock()
})
go client.EventLoop()
//go client.readReply()
go client.CreateBlock()
}
func (client *PbftClient) CreateBlock() {
issleep := true
if !client.isPrimary {
return
}
for {
if issleep {
time.Sleep(10 * time.Second)
}
plog.Info("=============start get tx===============")
lastBlock := client.GetCurrentBlock()
txs := client.RequestTx(int(types.GetP(lastBlock.Height+1).MaxTxNumber), nil)
if len(txs) == 0 {
issleep = true
continue
}
issleep = false
plog.Info("==================start create new block!=====================")
//check dup
//txs = client.CheckTxDup(txs)
//fmt.Println(len(txs))
var newblock types.Block
newblock.ParentHash = lastBlock.Hash()
newblock.Height = lastBlock.Height + 1
newblock.Txs = txs
newblock.TxHash = merkle.CalcMerkleRoot(newblock.Txs)
newblock.BlockTime = types.Now().Unix()
if lastBlock.BlockTime >= newblock.BlockTime {
newblock.BlockTime = lastBlock.BlockTime + 1
}
client.Propose(&newblock)
//time.Sleep(time.Second)
client.readReply()
plog.Info("===============readreply and writeblock done===============")
}
}
func (client *PbftClient) GetGenesisBlockTime() int64 {
return genesisBlockTime
}
func (client *PbftClient) CreateGenesisTx() (ret []*types.Transaction) {
var tx types.Transaction
tx.Execer = []byte("coins")
tx.To = genesis
//gen payload
g := &cty.CoinsAction_Genesis{}
g.Genesis = &types.AssetsGenesis{}
g.Genesis.Amount = 1e8 * types.Coin
tx.Payload = types.Encode(&cty.CoinsAction{Value: g, Ty: cty.CoinsActionGenesis})
ret = append(ret, &tx)
return
}
func (client *PbftClient) readReply() {
data := <-client.replyChan
if data == nil {
plog.Error("block is nil")
return
}
plog.Info("===============Get block from reply channel===========")
//client.SetCurrentBlock(data.Result.Value)
lastBlock := client.GetCurrentBlock()
err := client.WriteBlock(lastBlock.StateHash, data.Result.Value)
if err != nil {
plog.Error("********************err:", err)
return
}
client.SetCurrentBlock(data.Result.Value)
}
Title="local"
[log]
# 日志级别,支持debug(dbug)/info/warn/error(eror)/crit
loglevel = "info"
logConsoleLevel = "info"
# 日志文件名,可带目录,所有生成的日志文件都放到此目录下
logFile = "logs/chain33.log"
# 单个日志文件的最大值(单位:兆)
maxFileSize = 100
# 最多保存的历史日志文件个数
maxBackups = 50
# 最多保存的历史日志消息(单位:天)
maxAge = 28
# 日志文件名是否使用本地事件(否则使用UTC时间)
localTime = true
# 历史日志文件是否压缩(压缩格式为gz)
compress = true
# 是否打印调用源文件和行号
callerFile = false
# 是否打印调用方法
callerFunction = false
[blockchain]
defCacheSize=512
maxFetchBlockNum=128
timeoutSeconds=5
batchBlockNum=128
driver="leveldb"
dbPath="datadir"
isStrongConsistency=true
singleMode=true
batchsync=false
enableTxQuickIndex=true
[p2p]
seeds=["127.0.0.1:13802"]
enable=true
isSeed=true
serverStart=true
innerSeedEnable=false
useGithub=false
innerBounds=300
msgCacheSize=10240
driver="leveldb"
dbPath="datadir/addrbook"
dbCache=4
grpcLogFile="grpc33.log"
version=118
verMix=118
verMax=119
[mempool]
poolCacheSize=10240
minTxFee=100000
[rpc]
jrpcBindAddr="localhost:8801"
grpcBindAddr="localhost:8802"
whitelist=["127.0.0.1"]
[consensus]
name="pbft"
minerstart=false
[mver.consensus]
fundKeyAddr = "1BQXS6TxaYYG5mADaWij4AxhZZUTpw95a5"
coinReward = 18
coinDevFund = 12
ticketPrice = 10000
powLimitBits = "0x1f00ffff"
retargetAdjustmentFactor = 4
futureBlockTime = 16
ticketFrozenTime = 5 #5s only for test
ticketWithdrawTime = 10 #10s only for test
ticketMinerWaitTime = 2 #2s only for test
maxTxNumber = 1600 #160
targetTimespan = 2304
targetTimePerBlock = 16
[mver.consensus.ForkChainParamV1]
maxTxNumber = 10000
targetTimespan = 288 #only for test
targetTimePerBlock = 2
[mver.consensus.ForkChainParamV2]
powLimitBits = "0x1f2fffff"
[consensus.sub.pbft]
genesis="14KEKbYtKKQm4wMthSK9J4La4nAiidGozt"
genesisBlockTime=1514533394
nodeId=1
peersURL="127.0.0.1:8890"
clientAddr="127.0.0.1:8890"
[store]
name="mavl"
driver="leveldb"
dbPath="datadir/mavltree"
dbCache=128
[store.sub.mavl]
enableMavlPrefix=false
enableMVCC=false
[wallet]
minFee=100000
driver="leveldb"
dbPath="wallet"
dbCache=16
signType="secp256k1"
[wallet.sub.ticket]
minerdisable=false
minerwhitelist=["*"]
[exec]
isFree=false
minExecFee=100000
enableStat=false
enableMVCC=false
alias=["token1:token","token2:token","token3:token"]
saveTokenTxList=false
[exec.sub.cert]
# 是否启用证书验证和签名
enable=false
# 加密文件路径
cryptoPath="authdir/crypto"
# 带证书签名类型,支持"auth_ecdsa", "auth_sm2"
signType="auth_ecdsa"
package pbft
import (
"strings"
log "github.com/inconshreveable/log15"
"gitlab.33.cn/chain33/chain33/queue"
pb "gitlab.33.cn/chain33/chain33/types"
)
var (
plog = log.New("module", "Pbft")
genesis string
genesisBlockTime int64
clientAddr string
)
type subConfig struct {
Genesis string `json:"genesis"`
GenesisBlockTime int64 `json:"genesisBlockTime"`
NodeId int64 `json:"nodeId"`
PeersURL string `json:"peersURL"`
ClientAddr string `json:"clientAddr"`
}
func NewPbft(cfg *pb.Consensus, sub []byte) queue.Module {
plog.Info("start to creat pbft node")
var subcfg subConfig
if sub != nil {
pb.MustDecode(sub, &subcfg)
}
if subcfg.Genesis != "" {
genesis = subcfg.Genesis
}
if subcfg.GenesisBlockTime > 0 {
genesisBlockTime = subcfg.GenesisBlockTime
}
if int(subcfg.NodeId) == 0 || strings.Compare(subcfg.PeersURL, "") == 0 || strings.Compare(subcfg.ClientAddr, "") == 0 {
plog.Error("The nodeId, peersURL or clientAddr is empty!")
return nil
}
clientAddr = subcfg.ClientAddr
var c *PbftClient
replyChan, requestChan, isPrimary := NewReplica(uint32(subcfg.NodeId), subcfg.PeersURL, subcfg.ClientAddr)
c = NewBlockstore(cfg, replyChan, requestChan, isPrimary)
return c
}
=============== Sep 5, 2018 (CST) ===============
17:08:29.922706 log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed
17:08:29.923258 db@open opening
17:08:29.923864 version@stat F·[] S·0B[] Sc·[]
17:08:29.924069 db@janitor F·2 G·0
17:08:29.924089 db@open done T·818.266µs
=============== Sep 5, 2018 (CST) ===============
17:07:56.823741 log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed
17:07:56.824523 db@open opening
17:07:56.825400 version@stat F·[] S·0B[] Sc·[]
17:07:56.825624 db@janitor F·2 G·0
17:07:56.825646 db@open done T·1.107044ms
=============== Sep 5, 2018 (CST) ===============
17:08:29.905714 log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed
17:08:29.906016 version@stat F·[] S·0B[] Sc·[]
17:08:29.906037 db@open opening
17:08:29.906108 journal@recovery F·1
17:08:29.907790 journal@recovery recovering @1
17:08:29.908286 memdb@flush created L0@2 N·1 S·194B "Blo..Key,v1":"Blo..Key,v1"
17:08:29.908970 version@stat F·[1] S·194B[194B] Sc·[0.25]
17:08:29.909559 db@janitor F·3 G·0
17:08:29.909588 db@open done T·3.53978ms
=============== Sep 5, 2018 (CST) ===============
17:08:29.914061 log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed
17:08:29.914622 db@open opening
17:08:29.915266 version@stat F·[] S·0B[] Sc·[]
17:08:29.915477 db@janitor F·2 G·0
17:08:29.915495 db@open done T·860.67µs
=============== Sep 5, 2018 (CST) ===============
17:08:29.948081 log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed
17:08:29.948567 db@open opening
17:08:29.949041 version@stat F·[] S·0B[] Sc·[]
17:08:29.949212 db@janitor F·2 G·0
17:08:29.949228 db@open done T·651.21µs
package pbft
import (
"bytes"
"crypto/md5"
"fmt"
"io"
"net"
"github.com/golang/protobuf/proto"
"gitlab.33.cn/chain33/chain33/types"
)
// Digest
func EQ(d1 []byte, d2 []byte) bool {
if len(d1) != len(d2) {
return false
}
for idx, b := range d1 {
if b != d2[idx] {
return false
}
}
return true
}
// Checkpoint
func ToCheckpoint(sequence uint32, digest []byte) *types.Checkpoint {
return &types.Checkpoint{sequence, digest}
}
// Entry
func ToEntry(sequence uint32, digest []byte, view uint32) *types.Entry {
return &types.Entry{sequence, digest, view}
}
// ViewChange
func ToViewChange(viewchanger uint32, digest []byte) *types.ViewChange {
return &types.ViewChange{viewchanger, digest}
}
// Summary
func ToSummary(sequence uint32, digest []byte) *types.Summary {
return &types.Summary{sequence, digest}
}
// Request
func ToRequestClient(op *types.Operation, timestamp, client string) *types.Request {
return &types.Request{
Value: &types.Request_Client{
&types.RequestClient{op, timestamp, client}},
}
}
func ToRequestPreprepare(view, sequence uint32, digest []byte, replica uint32) *types.Request {
return &types.Request{
Value: &types.Request_Preprepare{
&types.RequestPrePrepare{view, sequence, digest, replica}},
}
}
func ToRequestPrepare(view, sequence uint32, digest []byte, replica uint32) *types.Request {
return &types.Request{
Value: &types.Request_Prepare{
&types.RequestPrepare{view, sequence, digest, replica}},
}
}
func ToRequestCommit(view, sequence, replica uint32) *types.Request {
return &types.Request{
Value: &types.Request_Commit{
&types.RequestCommit{view, sequence, replica}},
}
}
func ToRequestCheckpoint(sequence uint32, digest []byte, replica uint32) *types.Request {
return &types.Request{
Value: &types.Request_Checkpoint{
&types.RequestCheckpoint{sequence, digest, replica}},
}
}
func ToRequestViewChange(view, sequence uint32, checkpoints []*types.Checkpoint, preps, prePreps []*types.Entry, replica uint32) *types.Request {
return &types.Request{
Value: &types.Request_Viewchange{
&types.RequestViewChange{view, sequence, checkpoints, preps, prePreps, replica}},
}
}
func ToRequestAck(view, replica, viewchanger uint32, digest []byte) *types.Request {
return &types.Request{
Value: &types.Request_Ack{
&types.RequestAck{view, replica, viewchanger, digest}},
}
}
func ToRequestNewView(view uint32, viewChanges []*types.ViewChange, summaries []*types.Summary, replica uint32) *types.Request {
return &types.Request{
Value: &types.Request_Newview{
&types.RequestNewView{view, viewChanges, summaries, replica}},
}
}
// Request Methods
func ReqDigest(req *types.Request) []byte {
if req == nil {
return nil
}
bytes := md5.Sum([]byte(req.String()))
return bytes[:]
}
/*func (req *Request) LowWaterMark() uint32 {
// only for requestViewChange
reqViewChange := req.GetViewchange()
checkpoints := reqViewChange.GetCheckpoints()
lastStable := checkpoints[len(checkpoints)-1]
lwm := lastStable.Sequence
return lwm
}*/
// Reply
func ToReply(view uint32, timestamp, client string, replica uint32, result *types.Result) *types.ClientReply {
return &types.ClientReply{view, timestamp, client, replica, result}
}
// Reply Methods
func RepDigest(reply fmt.Stringer) []byte {
if reply == nil {
return nil
}
bytes := md5.Sum([]byte(reply.String()))
return bytes[:]
}
// Write proto message
func WriteMessage(addr string, msg proto.Message) error {
conn, err := net.Dial("tcp", addr)
defer conn.Close()
if err != nil {
return err
}
bz, err := proto.Marshal(msg)
if err != nil {
return err
}
n, err := conn.Write(bz)
plog.Debug("size of byte is", "", n)
return err
}
// Read proto message
func ReadMessage(conn io.Reader, msg proto.Message) error {
var buf bytes.Buffer
n, err := io.Copy(&buf, conn)
plog.Debug("size of byte is", "", n)
if err != nil {
return err
}
err = proto.Unmarshal(buf.Bytes(), msg)
return err
}
This diff is collapsed.
package pbft
import (
"flag"
"fmt"
"math/rand"
"os"
"strconv"
"testing"
"time"
"gitlab.33.cn/chain33/chain33/blockchain"
"gitlab.33.cn/chain33/chain33/common"
"gitlab.33.cn/chain33/chain33/common/crypto"
"gitlab.33.cn/chain33/chain33/common/limits"
"gitlab.33.cn/chain33/chain33/common/log"
"gitlab.33.cn/chain33/chain33/executor"
"gitlab.33.cn/chain33/chain33/mempool"
"gitlab.33.cn/chain33/chain33/p2p"
"gitlab.33.cn/chain33/chain33/queue"
"gitlab.33.cn/chain33/chain33/store"
cty "gitlab.33.cn/chain33/chain33/system/dapp/coins/types"
"gitlab.33.cn/chain33/chain33/types"
"gitlab.33.cn/chain33/chain33/wallet"
_ "gitlab.33.cn/chain33/chain33/plugin/dapp/init"
_ "gitlab.33.cn/chain33/chain33/plugin/store/init"
_ "gitlab.33.cn/chain33/chain33/system"
)
var (
random *rand.Rand
transactions []*types.Transaction
txSize = 1000
)
func init() {
err := limits.SetLimits()
if err != nil {
panic(err)
}
random = rand.New(rand.NewSource(types.Now().UnixNano()))
log.SetLogLevel("info")
}
func TestPbft(t *testing.T) {
q, chain, p2pnet, s, mem, exec, cs, wallet := initEnvPbft()
defer chain.Close()
defer mem.Close()
defer p2pnet.Close()
defer exec.Close()
defer s.Close()
defer cs.Close()
defer q.Close()
defer wallet.Close()
time.Sleep(5 * time.Second)
sendReplyList(q)
clearTestData()
}
func initEnvPbft() (queue.Queue, *blockchain.BlockChain, *p2p.P2p, queue.Module, *mempool.Mempool, queue.Module, queue.Module, queue.Module) {
var q = queue.New("channel")
flag.Parse()
cfg, sub := types.InitCfg("chain33.test.toml")
types.Init(cfg.Title, cfg)
chain := blockchain.New(cfg.BlockChain)
chain.SetQueueClient(q.Client())
mem := mempool.New(cfg.MemPool)
mem.SetQueueClient(q.Client())
exec := executor.New(cfg.Exec, sub.Exec)
exec.SetQueueClient(q.Client())
types.SetMinFee(0)
s := store.New(cfg.Store, sub.Store)
s.SetQueueClient(q.Client())
cs := NewPbft(cfg.Consensus, sub.Consensus["pbft"])
cs.SetQueueClient(q.Client())
p2pnet := p2p.New(cfg.P2P)
p2pnet.SetQueueClient(q.Client())
walletm := wallet.New(cfg.Wallet, sub.Wallet)
walletm.SetQueueClient(q.Client())
return q, chain, p2pnet, s, mem, exec, cs, walletm
}
func sendReplyList(q queue.Queue) {
client := q.Client()
client.Sub("mempool")
var count int
for msg := range client.Recv() {
if msg.Ty == types.EventTxList {
count++
createReplyList("test" + strconv.Itoa(count))
msg.Reply(client.NewMessage("consensus", types.EventReplyTxList,
&types.ReplyTxList{transactions}))
if count == 5 {
time.Sleep(5 * time.Second)
break
}
}
}
}
func getprivkey(key string) crypto.PrivKey {
cr, err := crypto.New(types.GetSignName("", types.SECP256K1))
if err != nil {
panic(err)
}
bkey, err := common.FromHex(key)
if err != nil {
panic(err)
}
priv, err := cr.PrivKeyFromBytes(bkey)
if err != nil {
panic(err)
}
return priv
}
func createReplyList(account string) {
var result []*types.Transaction
for j := 0; j < txSize; j++ {
//tx := &types.Transaction{}
val := &cty.CoinsAction_Transfer{&types.AssetsTransfer{Amount: 10}}
action := &cty.CoinsAction{Value: val, Ty: cty.CoinsActionTransfer}
tx := &types.Transaction{Execer: []byte("coins"), Payload: types.Encode(action), Fee: 0}
tx.To = "14qViLJfdGaP4EeHnDyJbEGQysnCpwn1gZ"
tx.Nonce = random.Int63()
tx.Sign(types.SECP256K1, getprivkey("CC38546E9E659D15E6B4893F0AB32A06D103931A8230B0BDE71459D2B27D6944"))
result = append(result, tx)
}
//result = append(result, tx)
transactions = result
}
func clearTestData() {
err := os.RemoveAll("datadir")
if err != nil {
fmt.Println("delete datadir have a err:", err.Error())
}
err = os.RemoveAll("wallet")
if err != nil {
fmt.Println("delete wallet have a err:", err.Error())
}
fmt.Println("test data clear sucessfully!")
}
package raft
import (
"fmt"
"sync"
"time"
"github.com/coreos/etcd/snap"
"github.com/golang/protobuf/proto"
"gitlab.33.cn/chain33/chain33/common/merkle"
"gitlab.33.cn/chain33/chain33/queue"
drivers "gitlab.33.cn/chain33/chain33/system/consensus"
cty "gitlab.33.cn/chain33/chain33/system/dapp/coins/types"
"gitlab.33.cn/chain33/chain33/types"
)
var (
zeroHash [32]byte
)
func init() {
drivers.Reg("raft", NewRaftCluster)
drivers.QueryData.Register("raft", &RaftClient{})
}
type RaftClient struct {
*drivers.BaseClient
proposeC chan<- *types.Block
commitC <-chan *types.Block
errorC <-chan error
snapshotter *snap.Snapshotter
validatorC <-chan bool
stopC chan<- struct{}
once sync.Once
}
func NewBlockstore(cfg *types.Consensus, snapshotter *snap.Snapshotter, proposeC chan<- *types.Block, commitC <-chan *types.Block, errorC <-chan error, validatorC <-chan bool, stopC chan<- struct{}) *RaftClient {
c := drivers.NewBaseClient(cfg)
client := &RaftClient{BaseClient: c, proposeC: proposeC, snapshotter: snapshotter, validatorC: validatorC, commitC: commitC, errorC: errorC, stopC: stopC}
c.SetChild(client)
return client
}
func (client *RaftClient) GetGenesisBlockTime() int64 {
return genesisBlockTime
}
func (client *RaftClient) CreateGenesisTx() (ret []*types.Transaction) {
var tx types.Transaction
tx.Execer = []byte(cty.CoinsX)
tx.To = genesis
//gen payload
g := &cty.CoinsAction_Genesis{}
g.Genesis = &types.AssetsGenesis{}
g.Genesis.Amount = 1e8 * types.Coin
tx.Payload = types.Encode(&cty.CoinsAction{Value: g, Ty: cty.CoinsActionGenesis})
ret = append(ret, &tx)
return
}
func (client *RaftClient) ProcEvent(msg queue.Message) bool {
return false
}
func (client *RaftClient) CheckBlock(parent *types.Block, current *types.BlockDetail) error {
return nil
}
func (client *RaftClient) getSnapshot() ([]byte, error) {
//这里可能导致死锁
return proto.Marshal(client.GetCurrentBlock())
}
func (client *RaftClient) recoverFromSnapshot(snapshot []byte) error {
var block types.Block
if err := proto.Unmarshal(snapshot, &block); err != nil {
return err
}
client.SetCurrentBlock(&block)
return nil
}
func (client *RaftClient) SetQueueClient(c queue.Client) {
rlog.Info("Enter SetQueue method of raft consensus")
client.InitClient(c, func() {
})
go client.EventLoop()
go client.readCommits(client.commitC, client.errorC)
go client.pollingTask(c)
}
func (client *RaftClient) Close() {
client.stopC <- struct{}{}
rlog.Info("consensus raft closed")
}
func (client *RaftClient) CreateBlock() {
issleep := true
retry := 0
infoflag := 0
count := 0
//打包区块前先同步到最大高度
for {
if client.IsCaughtUp() {
rlog.Info("Leader has caught up the max height")
break
}
time.Sleep(time.Second)
retry++
if retry >= 600 {
panic("This node encounter problem, exit.")
}
}
for {
//如果leader节点突然挂了,不是打包节点,需要退出
if !isLeader {
rlog.Warn("I'm not the validator node anymore, exit.=============================")
break
}
infoflag++
if infoflag >= 3 {
rlog.Info("==================This is Leader node=====================")
infoflag = 0
}
if issleep {
time.Sleep(10 * time.Second)
count++
}
if count >= 12 {
rlog.Info("Create an empty block")
block := client.GetCurrentBlock()
emptyBlock := &types.Block{}
emptyBlock.StateHash = block.StateHash
emptyBlock.ParentHash = block.Hash()
emptyBlock.Height = block.Height + 1
emptyBlock.Txs = nil
emptyBlock.TxHash = zeroHash[:]
emptyBlock.BlockTime = types.Now().Unix()
entry := emptyBlock
client.propose(entry)
er := client.WriteBlock(block.StateHash, emptyBlock)
if er != nil {
rlog.Error(fmt.Sprintf("********************err:%v", er.Error()))
continue
}
client.SetCurrentBlock(emptyBlock)
count = 0
}
lastBlock := client.GetCurrentBlock()
txs := client.RequestTx(int(types.GetP(lastBlock.Height+1).MaxTxNumber), nil)
if len(txs) == 0 {
issleep = true
continue
}
issleep = false
count = 0
rlog.Debug("==================start create new block!=====================")
var newblock types.Block
newblock.ParentHash = lastBlock.Hash()
newblock.Height = lastBlock.Height + 1
client.AddTxsToBlock(&newblock, txs)
newblock.TxHash = merkle.CalcMerkleRoot(newblock.Txs)
newblock.BlockTime = types.Now().Unix()
if lastBlock.BlockTime >= newblock.BlockTime {
newblock.BlockTime = lastBlock.BlockTime + 1
}
blockEntry := newblock
client.propose(&blockEntry)
err := client.WriteBlock(lastBlock.StateHash, &newblock)
if err != nil {
issleep = true
rlog.Error(fmt.Sprintf("********************err:%v", err.Error()))
continue
}
time.Sleep(time.Second * time.Duration(writeBlockSeconds))
}
}
// 向raft底层发送block
func (client *RaftClient) propose(block *types.Block) {
client.proposeC <- block
}
// 从receive channel中读leader发来的block
func (client *RaftClient) readCommits(commitC <-chan *types.Block, errorC <-chan error) {
var data *types.Block
var ok bool
for {
select {
case data, ok = <-commitC:
if !ok || data == nil {
continue
}
rlog.Debug("===============Get block from commit channel===========")
// 在程序刚开始启动的时候有可能存在丢失数据的问题
//区块高度统一由base中的相关代码进行变更,防止错误区块出现
//client.SetCurrentBlock(data)
case err, ok := <-errorC:
if ok {
panic(err)
}
}
}
}
//轮询任务,去检测本机器是否为validator节点,如果是,则执行打包任务
func (client *RaftClient) pollingTask(c queue.Client) {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
for {
select {
case value, ok := <-client.validatorC:
//各个节点Block只初始化一次
client.once.Do(func() {
client.InitBlock()
})
if ok && !value {
rlog.Debug("================I'm not the validator node!=============")
isLeader = false
} else if ok && !isLeader && value {
isLeader = true
go client.CreateBlock()
} else if !ok {
break
}
case <-ticker.C:
rlog.Debug("Gets the leader node information timeout and triggers the ticker.")
}
}
}
Title="local"
[log]
# 日志级别,支持debug(dbug)/info/warn/error(eror)/crit
loglevel = "debug"
logConsoleLevel = "info"
# 日志文件名,可带目录,所有生成的日志文件都放到此目录下
logFile = "logs/chain33.log"
# 单个日志文件的最大值(单位:兆)
maxFileSize = 300
# 最多保存的历史日志文件个数
maxBackups = 100
# 最多保存的历史日志消息(单位:天)
maxAge = 28
# 日志文件名是否使用本地事件(否则使用UTC时间)
localTime = true
# 历史日志文件是否压缩(压缩格式为gz)
compress = true
# 是否打印调用源文件和行号
callerFile = false
# 是否打印调用方法
callerFunction = false
[blockchain]
defCacheSize=512
maxFetchBlockNum=128
timeoutSeconds=5
batchBlockNum=128
driver="leveldb"
dbPath="datadir"
dbCache=64
isStrongConsistency=true
singleMode=true
batchsync=false
enableTxQuickIndex=true
[p2p]
seeds=["127.0.0.1:13802"]
enable=true
isSeed=true
serverStart=true
innerSeedEnable=false
useGithub=false
innerBounds=300
msgCacheSize=10240
driver="leveldb"
dbPath="datadir/addrbook"
dbCache=4
grpcLogFile="grpc33.log"
version=15
verMix=15
verMax=16
[rpc]
jrpcBindAddr="localhost:8801"
grpcBindAddr="localhost:8802"
whitelist=["127.0.0.1"]
jrpcFuncWhitelist=["*"]
grpcFuncWhitelist=["*"]
[mempool]
poolCacheSize=10240
minTxFee=100000
[consensus]
# 共识驱动名,支持solo/raft/ticket/tendermint/pbft
name="raft"
minerstart=false
[mver.consensus]
fundKeyAddr = "1BQXS6TxaYYG5mADaWij4AxhZZUTpw95a5"
coinReward = 18
coinDevFund = 12
ticketPrice = 10000
powLimitBits = "0x1f00ffff"
retargetAdjustmentFactor = 4
futureBlockTime = 16
ticketFrozenTime = 5 #5s only for test
ticketWithdrawTime = 10 #10s only for test
ticketMinerWaitTime = 2 #2s only for test
maxTxNumber = 1600 #160
targetTimespan = 2304
targetTimePerBlock = 16
[mver.consensus.ForkChainParamV1]
maxTxNumber = 10000
targetTimespan = 288 #only for test
targetTimePerBlock = 2
[mver.consensus.ForkChainParamV2]
powLimitBits = "0x1f2fffff"
[consensus.sub.raft]
genesis="14KEKbYtKKQm4wMthSK9J4La4nAiidGozt"
genesisBlockTime=1514533394
# =============== raft共识配置参数 ===========================
# 共识节点ID,raft共识用到,不同的节点设置不同的nodeId(目前只支持1,2,3这种设置)
nodeId=1
# raft共识用到,通过这个端口进行节点的增加和删除
raftApiPort=9121
# raft共识用到,指示这个节点是否新增加节点
isNewJoinNode=false
# raft共识用到,指示raft集群中的服务器IP和端口
peersURL="http://127.0.0.1:9021"
# raft共识用到,指示raft集群中只读节点的IP(只同步日志,不参与raft共识)
readOnlyPeersURL=""
addPeersURL=""
#raft共识用到,默认raft中多少条记录打包一个snapshot
defaultSnapCount=1000
#raft共识用到,默认raft中写区块时间间隔
writeBlockSeconds=1
#raft共识用到,默认raft中leader发送心跳包时间间隔
heartbeatTick=1
# =============== raft共识配置参数 ===========================
[store]
name="kvdb"
driver="leveldb"
dbPath="datadir/mavltree"
dbCache=128
[store.sub.kvdb]
enableMavlPrefix=false
enableMVCC=false
[wallet]
minFee=100000
driver="leveldb"
dbPath="wallet"
dbCache=16
signType="secp256k1"
[wallet.sub.ticket]
minerdisable=false
minerwhitelist=["*"]
[exec]
isFree=false
minExecFee=100000
enableStat=false
enableMVCC=false
alias=["token1:token","token2:token","token3:token"]
saveTokenTxList=false
[exec.sub.cert]
# 是否启用证书验证和签名
enable=false
# 加密文件路径
cryptoPath="authdir/crypto"
# 带证书签名类型,支持"auth_ecdsa", "auth_sm2"
signType="auth_ecdsa"
package raft
import (
"strings"
"github.com/coreos/etcd/raft/raftpb"
log "github.com/inconshreveable/log15"
"gitlab.33.cn/chain33/chain33/queue"
"gitlab.33.cn/chain33/chain33/types"
)
var (
rlog = log.New("module", "raft")
genesis string
genesisBlockTime int64
defaultSnapCount uint64 = 1000
snapshotCatchUpEntriesN uint64 = 1000
writeBlockSeconds int64 = 1
heartbeatTick int = 1
isLeader bool = false
confChangeC chan raftpb.ConfChange
)
type subConfig struct {
Genesis string `json:"genesis"`
GenesisBlockTime int64 `json:"genesisBlockTime"`
NodeId int64 `json:"nodeId"`
PeersURL string `json:"peersURL"`
RaftApiPort int64 `json:"raftApiPort"`
IsNewJoinNode bool `json:"isNewJoinNode"`
ReadOnlyPeersURL string `json:"readOnlyPeersURL"`
AddPeersURL string `json:"addPeersURL"`
DefaultSnapCount int64 `json:"defaultSnapCount"`
WriteBlockSeconds int64 `json:"writeBlockSeconds"`
HeartbeatTick int32 `json:"heartbeatTick"`
}
func NewRaftCluster(cfg *types.Consensus, sub []byte) queue.Module {
rlog.Info("Start to create raft cluster")
var subcfg subConfig
if sub != nil {
types.MustDecode(sub, &subcfg)
}
if subcfg.Genesis != "" {
genesis = subcfg.Genesis
}
if subcfg.GenesisBlockTime > 0 {
genesisBlockTime = subcfg.GenesisBlockTime
}
if int(subcfg.NodeId) == 0 || strings.Compare(subcfg.PeersURL, "") == 0 {
rlog.Error("Please check whether the configuration of nodeId and peersURL is empty!")
//TODO 当传入的参数异常时,返回给主函数的是个nil,这时候需要做异常处理
return nil
}
// 默认1000个Entry打一个snapshot
if subcfg.DefaultSnapCount > 0 {
defaultSnapCount = uint64(subcfg.DefaultSnapCount)
snapshotCatchUpEntriesN = uint64(subcfg.DefaultSnapCount)
}
// write block interval in second
if subcfg.WriteBlockSeconds > 0 {
writeBlockSeconds = subcfg.WriteBlockSeconds
}
// raft leader sends heartbeat messages every HeartbeatTick ticks
if subcfg.HeartbeatTick > 0 {
heartbeatTick = int(subcfg.HeartbeatTick)
}
// propose channel
proposeC := make(chan *types.Block)
confChangeC = make(chan raftpb.ConfChange)
var b *RaftClient
getSnapshot := func() ([]byte, error) { return b.getSnapshot() }
// raft集群的建立,1. 初始化两条channel: propose channel用于客户端和raft底层交互, commit channel用于获取commit消息
// 2. raft集群中的节点之间建立http连接
peers := strings.Split(subcfg.PeersURL, ",")
if len(peers) == 1 && peers[0] == "" {
peers = []string{}
}
readOnlyPeers := strings.Split(subcfg.ReadOnlyPeersURL, ",")
if len(readOnlyPeers) == 1 && readOnlyPeers[0] == "" {
readOnlyPeers = []string{}
}
addPeers := strings.Split(subcfg.AddPeersURL, ",")
if len(addPeers) == 1 && addPeers[0] == "" {
addPeers = []string{}
}
commitC, errorC, snapshotterReady, validatorC, stopC := NewRaftNode(int(subcfg.NodeId), subcfg.IsNewJoinNode, peers, readOnlyPeers, addPeers, getSnapshot, proposeC, confChangeC)
//启动raft删除节点操作监听
go serveHttpRaftAPI(int(subcfg.RaftApiPort), confChangeC, errorC)
// 监听commit channel,取block
b = NewBlockstore(cfg, <-snapshotterReady, proposeC, commitC, errorC, validatorC, stopC)
return b
}
package raft
import (
"io/ioutil"
"net/http"
"strconv"
"fmt"
"github.com/coreos/etcd/raft/raftpb"
)
// Handler for a http based httpRaftAPI backed by raft
type httpRaftAPI struct {
confChangeC chan<- raftpb.ConfChange
}
func (h *httpRaftAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
key := r.RequestURI
switch {
case r.Method == "POST":
url, err := ioutil.ReadAll(r.Body)
if err != nil {
rlog.Error(fmt.Sprintf("Failed to convert ID for conf change (%v)", err.Error()))
http.Error(w, "Failed on POST", http.StatusBadRequest)
return
}
nodeId, err := strconv.ParseUint(key[1:], 0, 64)
if err != nil {
rlog.Error(fmt.Sprintf("Failed to convert ID for conf change (%v)", err.Error()))
http.Error(w, "Failed on POST", http.StatusBadRequest)
return
}
cc := raftpb.ConfChange{
Type: raftpb.ConfChangeAddNode,
NodeID: nodeId,
Context: url,
}
h.confChangeC <- cc
// As above, optimistic that raft will apply the conf change
w.WriteHeader(http.StatusCreated)
case r.Method == "DELETE":
nodeId, err := strconv.ParseUint(key[1:], 0, 64)
if err != nil {
rlog.Error(fmt.Sprintf("Failed to convert ID for conf change (%v)", err.Error()))
http.Error(w, "Failed on DELETE", http.StatusBadRequest)
return
}
cc := raftpb.ConfChange{
Type: raftpb.ConfChangeRemoveNode,
NodeID: nodeId,
}
h.confChangeC <- cc
// As above, optimistic that raft will apply the conf change
w.WriteHeader(http.StatusAccepted)
default:
w.Header().Add("Allow", "POST")
w.Header().Add("Allow", "DELETE")
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
}
}
func serveHttpRaftAPI(port int, confChangeC chan<- raftpb.ConfChange, errorC <-chan error) {
srv := http.Server{
Addr: "localhost:" + strconv.Itoa(port),
Handler: &httpRaftAPI{
confChangeC: confChangeC,
},
}
go func() {
if err := srv.ListenAndServe(); err != nil {
rlog.Error(fmt.Sprintf("ListenAndServe have a err: (%v)", err.Error()))
}
}()
// exit when raft goes down
if err, ok := <-errorC; ok {
rlog.Error(fmt.Sprintf("the errorC chan receive a err (%v)\n", err.Error()))
}
}
package raft
import (
"errors"
"net"
"time"
)
// 设置TCP keep-alive超时,接收stopc
type stoppableListener struct {
*net.TCPListener
stopc <-chan struct{}
}
// 监听tcp连接
func newStoppableListener(addr string, stopc <-chan struct{}) (*stoppableListener, error) {
ln, err := net.Listen("tcp", addr)
if err != nil {
return nil, err
}
return &stoppableListener{ln.(*net.TCPListener), stopc}, nil
}
func (ln stoppableListener) Accept() (c net.Conn, err error) {
connc := make(chan *net.TCPConn, 1)
errc := make(chan error, 1)
go func() {
tc, err := ln.AcceptTCP()
if err != nil {
errc <- err
return
}
connc <- tc
}()
select {
case <-ln.stopc:
return nil, errors.New("server stopped")
case err := <-errc:
return nil, err
case tc := <-connc:
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute)
return tc, nil
}
}
This diff is collapsed.
package raft
import (
"encoding/binary"
"flag"
"fmt"
"math/rand"
"os"
"testing"
"time"
"gitlab.33.cn/chain33/chain33/blockchain"
"gitlab.33.cn/chain33/chain33/common"
"gitlab.33.cn/chain33/chain33/common/address"
"gitlab.33.cn/chain33/chain33/common/crypto"
"gitlab.33.cn/chain33/chain33/common/limits"
"gitlab.33.cn/chain33/chain33/common/log"
"gitlab.33.cn/chain33/chain33/executor"
"gitlab.33.cn/chain33/chain33/mempool"
"gitlab.33.cn/chain33/chain33/p2p"
"gitlab.33.cn/chain33/chain33/queue"
"gitlab.33.cn/chain33/chain33/store"
"gitlab.33.cn/chain33/chain33/types"
_ "gitlab.33.cn/chain33/chain33/plugin/dapp/init"
pty "gitlab.33.cn/chain33/chain33/plugin/dapp/norm/types"
_ "gitlab.33.cn/chain33/chain33/plugin/store/init"
_ "gitlab.33.cn/chain33/chain33/system"
)
var (
random *rand.Rand
txNumber int = 10
loopCount int = 10
)
func init() {
err := limits.SetLimits()
if err != nil {
panic(err)
}
random = rand.New(rand.NewSource(types.Now().UnixNano()))
log.SetLogLevel("info")
}
func TestRaftPerf(t *testing.T) {
RaftPerf()
fmt.Println("=======start clear test data!=======")
clearTestData()
}
func RaftPerf() {
q, chain, s, mem, exec, cs, p2p := initEnvRaft()
defer chain.Close()
defer mem.Close()
defer exec.Close()
defer s.Close()
defer q.Close()
defer cs.Close()
defer p2p.Close()
sendReplyList(q)
}
func initEnvRaft() (queue.Queue, *blockchain.BlockChain, queue.Module, *mempool.Mempool, queue.Module, queue.Module, queue.Module) {
var q = queue.New("channel")
flag.Parse()
cfg, sub := types.InitCfg("chain33.test.toml")
types.Init(cfg.Title, cfg)
chain := blockchain.New(cfg.BlockChain)
chain.SetQueueClient(q.Client())
exec := executor.New(cfg.Exec, sub.Exec)
exec.SetQueueClient(q.Client())
types.SetMinFee(0)
s := store.New(cfg.Store, sub.Store)
s.SetQueueClient(q.Client())
cs := NewRaftCluster(cfg.Consensus, sub.Consensus["raft"])
cs.SetQueueClient(q.Client())
mem := mempool.New(cfg.MemPool)
mem.SetQueueClient(q.Client())
network := p2p.New(cfg.P2P)
network.SetQueueClient(q.Client())
return q, chain, s, mem, exec, cs, network
}
func generateKey(i, valI int) string {
key := make([]byte, valI)
binary.PutUvarint(key[:10], uint64(valI))
binary.PutUvarint(key[12:24], uint64(i))
if _, err := rand.Read(key[24:]); err != nil {
os.Exit(1)
}
return string(key)
}
func generateValue(i, valI int) string {
value := make([]byte, valI)
binary.PutUvarint(value[:16], uint64(i))
binary.PutUvarint(value[32:128], uint64(i))
if _, err := rand.Read(value[128:]); err != nil {
os.Exit(1)
}
return string(value)
}
func getprivkey(key string) crypto.PrivKey {
cr, err := crypto.New(types.GetSignName("", types.SECP256K1))
if err != nil {
panic(err)
}
bkey, err := common.FromHex(key)
if err != nil {
panic(err)
}
priv, err := cr.PrivKeyFromBytes(bkey)
if err != nil {
panic(err)
}
return priv
}
func sendReplyList(q queue.Queue) {
client := q.Client()
client.Sub("mempool")
var count int
for msg := range client.Recv() {
if msg.Ty == types.EventTxList {
count++
msg.Reply(client.NewMessage("consensus", types.EventReplyTxList,
&types.ReplyTxList{getReplyList(txNumber)}))
if count >= loopCount {
time.Sleep(4 * time.Second)
break
}
}
}
}
func prepareTxList() *types.Transaction {
var key string
var value string
var i int
key = generateKey(i, 32)
value = generateValue(i, 180)
nput := &pty.NormAction_Nput{&pty.NormPut{Key: key, Value: []byte(value)}}
action := &pty.NormAction{Value: nput, Ty: pty.NormActionPut}
tx := &types.Transaction{Execer: []byte("norm"), Payload: types.Encode(action), Fee: 0}
tx.To = address.ExecAddress("norm")
tx.Nonce = random.Int63()
tx.Sign(types.SECP256K1, getprivkey("CC38546E9E659D15E6B4893F0AB32A06D103931A8230B0BDE71459D2B27D6944"))
return tx
}
func getReplyList(n int) (txs []*types.Transaction) {
for i := 0; i < int(n); i++ {
txs = append(txs, prepareTxList())
}
return txs
}
func clearTestData() {
err := os.RemoveAll("datadir")
if err != nil {
fmt.Println("delete datadir have a err:", err.Error())
}
err = os.RemoveAll("chain33_raft-1")
if err != nil {
fmt.Println("delete chain33_raft dir have a err:", err.Error())
}
fmt.Println("test data clear sucessfully!")
}
raftPerf
scripts/chain33
scripts/chain33_raft-1/
package main
import (
"flag"
"fmt"
"log"
"path/filepath"
"strconv"
"github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/pbutil"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft/raftpb"
raftsnap "github.com/coreos/etcd/snap"
"github.com/coreos/etcd/wal"
"github.com/coreos/etcd/wal/walpb"
"github.com/golang/protobuf/proto"
)
func main() {
snapfile := flag.String("start-snap", "", "The base name of snapshot file to start dumping")
index := flag.String("start-index", "", "The index to start dumping")
flag.Parse()
if len(flag.Args()) != 1 {
log.Fatalf("Must provide data-dir argument (got %+v)", flag.Args())
}
dataDir := flag.Args()[0]
if *snapfile != "" && *index != "" {
log.Fatal("start-snap and start-index flags cannot be used together.")
}
var (
walsnap walpb.Snapshot
snapshot *raftpb.Snapshot
err error
)
isIndex := *index != ""
if isIndex {
fmt.Printf("Start dumping log entries from index %s.\n", *index)
walsnap.Index, err = strconv.ParseUint(*index, 10, 64)
} else {
if *snapfile == "" {
ss := raftsnap.New(snapDir(dataDir))
snapshot, err = ss.Load()
} else {
snapshot, err = raftsnap.Read(filepath.Join(snapDir(dataDir), *snapfile))
}
switch err {
case nil:
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
nodes := genIDSlice(snapshot.Metadata.ConfState.Nodes)
fmt.Printf("Snapshot:\nterm=%d index=%d nodes=%s\n",
walsnap.Term, walsnap.Index, nodes)
case raftsnap.ErrNoSnapshot:
fmt.Printf("Snapshot:\nempty\n")
default:
log.Fatalf("Failed loading snapshot: %v", err)
}
fmt.Println("Start dupmping log entries from snapshot.")
}
w, err := wal.OpenForRead(walDir(dataDir), walsnap)
if err != nil {
log.Fatalf("Failed opening WAL: %v", err)
}
wmetadata, state, ents, err := w.ReadAll()
w.Close()
if err != nil && (!isIndex || err != wal.ErrSnapshotNotFound) {
log.Fatalf("Failed reading WAL: %v", err)
}
id, cid := parseWALMetadata(wmetadata)
vid := types.ID(state.Vote)
fmt.Printf("WAL metadata:\nnodeID=%s clusterID=%s term=%d commitIndex=%d vote=%s\n",
id, cid, state.Term, state.Commit, vid)
fmt.Printf("WAL entries:\n")
fmt.Printf("lastIndex=%d\n", ents[len(ents)-1].Index)
fmt.Printf("%4s\t%10s\ttype\tdata\n", "term", "index")
for _, e := range ents {
msg := fmt.Sprintf("%4d\t%10d", e.Term, e.Index)
switch e.Type {
case raftpb.EntryNormal:
msg = fmt.Sprintf("%s\tnorm", msg)
if len(e.Data) == 0 {
break
}
// 解码
block := &Block{}
if err := proto.Unmarshal(e.Data, block); err != nil {
log.Printf("failed to unmarshal: %v", err)
break
}
msg = fmt.Sprintf("%s\t BlockHeight:%d", msg, block.Height)
case raftpb.EntryConfChange:
msg = fmt.Sprintf("%s\tconf", msg)
var r raftpb.ConfChange
if err := r.Unmarshal(e.Data); err != nil {
msg = fmt.Sprintf("%s\t???", msg)
} else {
msg = fmt.Sprintf("%s\tmethod=%s id=%s", msg, r.Type, types.ID(r.NodeID))
}
}
fmt.Println(msg)
}
}
func walDir(dataDir string) string { return filepath.Join(dataDir, "wal") }
func snapDir(dataDir string) string { return filepath.Join(dataDir, "snap") }
func parseWALMetadata(b []byte) (id, cid types.ID) {
var metadata etcdserverpb.Metadata
pbutil.MustUnmarshal(&metadata, b)
id = types.ID(metadata.NodeID)
cid = types.ID(metadata.ClusterID)
return id, cid
}
func genIDSlice(a []uint64) []types.ID {
ids := make([]types.ID, len(a))
for i, id := range a {
ids[i] = types.ID(id)
}
return ids
}
type Block struct {
Version int64 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"`
ParentHash []byte `protobuf:"bytes,2,opt,name=parentHash,proto3" json:"parentHash,omitempty"`
TxHash []byte `protobuf:"bytes,3,opt,name=txHash,proto3" json:"txHash,omitempty"`
StateHash []byte `protobuf:"bytes,4,opt,name=stateHash,proto3" json:"stateHash,omitempty"`
Height int64 `protobuf:"varint,5,opt,name=height" json:"height,omitempty"`
BlockTime int64 `protobuf:"varint,6,opt,name=blockTime" json:"blockTime,omitempty"`
//Signature *Signature `protobuf:"bytes,8,opt,name=signature" json:"signature,omitempty"`
//Txs []*Transaction `protobuf:"bytes,7,rep,name=txs" json:"txs,omitempty"`
}
func (m *Block) Reset() { *m = Block{} }
func (m *Block) String() string { return proto.CompactTextString(m) }
func (*Block) ProtoMessage() {}
package main
import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"math/rand"
"os"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/proto"
"gitlab.33.cn/chain33/chain33/common"
"gitlab.33.cn/chain33/chain33/common/address"
"gitlab.33.cn/chain33/chain33/common/crypto"
rlog "gitlab.33.cn/chain33/chain33/common/log"
pty "gitlab.33.cn/chain33/chain33/plugin/dapp/norm/types"
"gitlab.33.cn/chain33/chain33/types"
"google.golang.org/grpc"
)
const fee = 1e6
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
var conn *grpc.ClientConn
var c types.Chain33Client
var r *rand.Rand
func createConn(ip string) {
var err error
url := ip + ":8802"
fmt.Println("grpc url:", url)
conn, err = grpc.Dial(url, grpc.WithInsecure())
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
c = types.NewChain33Client(conn)
r = rand.New(rand.NewSource(types.Now().UnixNano()))
}
func main() {
rlog.SetLogLevel("eror")
if len(os.Args) == 1 || os.Args[1] == "-h" {
LoadHelp()
return
}
createConn(os.Args[1])
argsWithoutProg := os.Args[2:]
switch argsWithoutProg[0] {
case "-h": //使用帮助
LoadHelp()
case "transferperf":
if len(argsWithoutProg) != 6 {
fmt.Print(errors.New("参数错误").Error())
return
}
TransferPerf(argsWithoutProg[1], argsWithoutProg[2], argsWithoutProg[3], argsWithoutProg[4], argsWithoutProg[5])
case "sendtoaddress":
if len(argsWithoutProg) != 5 {
fmt.Print(errors.New("参数错误").Error())
return
}
SendToAddress(argsWithoutProg[1], argsWithoutProg[2], argsWithoutProg[3], argsWithoutProg[4])
case "normperf":
if len(argsWithoutProg) != 5 {
fmt.Print(errors.New("参数错误").Error())
return
}
NormPerf(argsWithoutProg[1], argsWithoutProg[2], argsWithoutProg[3], argsWithoutProg[4])
case "normput":
if len(argsWithoutProg) != 4 {
fmt.Print(errors.New("参数错误").Error())
return
}
NormPut(argsWithoutProg[1], argsWithoutProg[2], argsWithoutProg[3])
case "normget":
if len(argsWithoutProg) != 2 {
fmt.Print(errors.New("参数错误").Error())
return
}
NormGet(argsWithoutProg[1])
//zzh
case "normreadperf":
if len(argsWithoutProg) != 4 {
fmt.Print(errors.New("参数错误").Error())
return
}
NormReadPerf(argsWithoutProg[1], argsWithoutProg[2], argsWithoutProg[3])
}
}
func LoadHelp() {
fmt.Println("Available Commands:")
fmt.Println("[ip] transferperf [from, to, amount, txNum, duration] : 转账性能测试")
fmt.Println("[ip] sendtoaddress [from, to, amount, note] : 发送交易到地址")
fmt.Println("[ip] normperf [size, num, interval, duration] : 常规写数据性能测试")
fmt.Println("[ip] normput [privkey, key, value] : 常规写数据")
fmt.Println("[ip] normget [key] : 常规读数据")
fmt.Println("[ip] normreadperf [num, interval, duration] : 常规读数据性能测试")
}
func TransferPerf(from string, to string, amount string, txNum string, duration string) {
txNumInt, err := strconv.Atoi(txNum)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
durInt, err := strconv.Atoi(duration)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
ch := make(chan struct{}, txNumInt)
for i := 0; i < txNumInt; i++ {
go func() {
txs := 0
for {
SendToAddress(from, to, amount, "test")
txs++
if durInt != 0 && txs == durInt {
break
}
time.Sleep(time.Second)
}
ch <- struct{}{}
}()
}
for j := 0; j < txNumInt; j++ {
<-ch
}
}
func SendToAddress(from string, to string, amount string, note string) {
amountFloat64, err := strconv.ParseFloat(amount, 64)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
amountInt64 := int64(amountFloat64 * 1e4)
tx := &types.ReqWalletSendToAddress{From: from, To: to, Amount: amountInt64 * 1e4, Note: note}
reply, err := c.SendToAddress(context.Background(), tx)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
data, err := json.MarshalIndent(reply, "", " ")
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
fmt.Println(string(data))
}
func NormPerf(size string, num string, interval string, duration string) {
var key string
var value string
var numThread int
sizeInt, err := strconv.Atoi(size)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
numInt, err := strconv.Atoi(num)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
intervalInt, err := strconv.Atoi(interval)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
durInt, err := strconv.Atoi(duration)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
if numInt < 10 {
numThread = 1
} else if numInt > 100 {
numThread = 10
} else {
numThread = numInt / 10
}
maxTxPerAcc := 50
ch := make(chan struct{}, numThread)
for i := 0; i < numThread; i++ {
go func() {
var result int64 = 0
totalCount := 0
txCount := 0
_, priv := genaddress()
for sec := 0; durInt == 0 || sec < durInt; {
start := time.Now()
for txs := 0; txs < numInt/numThread; txs++ {
if txCount >= maxTxPerAcc {
_, priv = genaddress()
txCount = 0
}
key = RandStringBytes(20)
value = RandStringBytes(sizeInt)
NormPut(common.ToHex(priv.Bytes()), key, value)
txCount++
totalCount++
}
end := time.Now()
result += end.Sub(start).Nanoseconds() / 1000000
time.Sleep(time.Second * time.Duration(intervalInt))
sec += intervalInt
}
fmt.Println("perform put ", totalCount, " times, cost time [ms]:", result)
ch <- struct{}{}
}()
}
for j := 0; j < numThread; j++ {
<-ch
}
}
//zzh
func NormReadPerf(num string, interval string, duration string) {
var numThread int
numInt, err := strconv.Atoi(num)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
intervalInt, err := strconv.Atoi(interval)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
durInt, err := strconv.Atoi(duration)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
if numInt < 10 {
numThread = 1
} else if numInt > 100 {
numThread = 10
} else {
numThread = numInt / 10
}
ch := make(chan struct{}, numThread)
for i := 0; i < numThread; i++ {
go func() {
f, err := os.Open("normperf.log")
if err != nil {
panic("open file failed.")
return
}
buf := bufio.NewReader(f)
cnt := 0
var result, totalTime int64
result = 0
totalTime = 0
totalCount := 0
for sec := 0; durInt == 0 || sec < durInt; {
start := time.Now()
for txs := 0; txs < numInt/numThread; txs++ {
line, err := buf.ReadString('\n')
if err != nil {
if err == io.EOF {
f, err := os.Open("normperf.log")
if err != nil {
panic("open file failed.")
return
}
buf = bufio.NewReader(f)
}
continue
}
line = strings.Replace(line, " ", "", -1)
index := strings.IndexAny(line, "=")
if index > 0 {
prefix := []byte(line)[0:index]
NormGet(string(prefix))
cnt++
totalCount++
} else {
continue
}
}
end := time.Now()
result += end.Sub(start).Nanoseconds() / 1000000
if cnt > 100 {
fmt.Println("normal get ", cnt, "times, cost time [ms]:", result)
totalTime += result
cnt = 0
result = 0
}
time.Sleep(time.Second * time.Duration(intervalInt))
sec += intervalInt
}
totalTime += result
fmt.Println("perform total get ", totalCount, " times, cost total time [ms]:", totalTime)
ch <- struct{}{}
}()
}
for j := 0; j < numThread; j++ {
<-ch
}
}
func RandStringBytes(n int) string {
b := make([]byte, n)
rand.Seed(types.Now().UnixNano())
for i := range b {
b[i] = letterBytes[rand.Intn(len(letterBytes))]
}
return string(b)
}
func NormPut(privkey string, key string, value string) {
fmt.Println(key, "=", value)
nput := &pty.NormAction_Nput{&pty.NormPut{Key: key, Value: []byte(value)}}
action := &pty.NormAction{Value: nput, Ty: pty.NormActionPut}
tx := &types.Transaction{Execer: []byte("norm"), Payload: types.Encode(action), Fee: fee}
tx.To = address.ExecAddress("norm")
tx.Nonce = r.Int63()
tx.Sign(types.SECP256K1, getprivkey(privkey))
reply, err := c.SendTransaction(context.Background(), tx)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
if !reply.IsOk {
fmt.Fprintln(os.Stderr, errors.New(string(reply.GetMsg())))
return
}
}
func NormGet(key string) {
in := &pty.NormGetKey{Key: key}
data, err := proto.Marshal(in)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
var req types.ChainExecutor
req.Driver = "norm"
req.FuncName = "NormGet"
req.Param = data
reply, err := c.QueryChain(context.Background(), &req)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
if !reply.IsOk {
fmt.Fprintln(os.Stderr, errors.New(string(reply.GetMsg())))
return
}
//the first two byte is not valid
//QueryChain() need to change
//value := string(reply.Msg[2:])
//fmt.Println("GetValue =", value)
}
func getprivkey(key string) crypto.PrivKey {
cr, err := crypto.New(types.GetSignName("", types.SECP256K1))
if err != nil {
panic(err)
}
bkey, err := common.FromHex(key)
if err != nil {
panic(err)
}
priv, err := cr.PrivKeyFromBytes(bkey)
if err != nil {
panic(err)
}
return priv
}
func genaddress() (string, crypto.PrivKey) {
cr, err := crypto.New(types.GetSignName("", types.SECP256K1))
if err != nil {
panic(err)
}
privto, err := cr.GenKey()
if err != nil {
panic(err)
}
addrto := address.PubKeyToAddress(privto.PubKey().Bytes())
fmt.Println("addr:", addrto.String())
return addrto.String(), privto
}
#!/usr/bin/env bash
#这是一个build 构建脚本,用于编译打包chain33
echo "-----start build chain33-----"
SHELL_FOLDER=$(
cd "$(dirname "$0")" || exit 1
pwd
)
echo "cur dir:$SHELL_FOLDER"
cd "$SHELL_FOLDER"/../../../../../cmd/chain33/ || exit 1
echo "---go build -o chain33---"
go build -o chain33
mv chain33 "$SHELL_FOLDER"
curDir=$(pwd)
echo "cur dir:$curDir"
cd "$SHELL_FOLDER" || exit 1
#dos2unix *.sh
tar cvf chain33.tgz chain33 chain33.toml raft_conf.sh run.sh
rm -rf chain33
echo "---- chain33 build success!----- "
Title="chain33"
TestNet=false
[log]
# 日志级别,支持debug(dbug)/info/warn/error(eror)/crit
loglevel = "debug"
logConsoleLevel = "info"
# 日志文件名,可带目录,所有生成的日志文件都放到此目录下
logFile = "logs/chain33.log"
# 单个日志文件的最大值(单位:兆)
maxFileSize = 300
# 最多保存的历史日志文件个数
maxBackups = 100
# 最多保存的历史日志消息(单位:天)
maxAge = 28
# 日志文件名是否使用本地事件(否则使用UTC时间)
localTime = true
# 历史日志文件是否压缩(压缩格式为gz)
compress = true
# 是否打印调用源文件和行号
callerFile = false
# 是否打印调用方法
callerFunction = false
[blockchain]
defCacheSize=512
maxFetchBlockNum=128
timeoutSeconds=5
batchBlockNum=128
driver="leveldb"
dbPath="datadir"
dbCache=64
isStrongConsistency=true
singleMode=true
batchsync=false
[p2p]
seeds=["114.55.149.144:13802","139.224.19.175:13802","139.224.82.165:13802"]
enable=true
isSeed=true
serverStart=true
innerSeedEnable=false
useGithub=false
innerBounds=300
msgCacheSize=10240
driver="leveldb"
dbPath="datadir/addrbook"
dbCache=4
grpcLogFile="grpc33.log"
version=15
verMix=15
verMax=16
[rpc]
jrpcBindAddr="localhost:8801"
grpcBindAddr="localhost:8802"
whitlist=["127.0.0.1"]
[mempool]
poolCacheSize=10240
minTxFee=100000
[consensus]
# 共识驱动名,支持solo/raft/ticket/tendermint/pbft
name="raft"
minerstart=true
genesis="14KEKbYtKKQm4wMthSK9J4La4nAiidGozt"
# =============== raft共识配置参数 ===========================
# 共识节点ID,raft共识用到,不同的节点设置不同的nodeId(目前只支持1,2,3这种设置)
nodeId=1
# raft共识用到,通过这个端口进行节点的增加和删除
raftApiPort=9121
# raft共识用到,指示这个节点是否新增加节点
isNewJoinNode=false
# raft共识用到,指示raft集群中的服务器IP和端口
peersURL="http://114.55.149.144:9021,http://139.224.19.175:9021,http://139.224.82.165:9021"
# raft共识用到,指示raft集群中只读节点的IP(只同步日志,不参与raft共识)
readOnlyPeersURL=""
addPeersURL=""
#raft共识用到,默认raft中多少条记录打包一个snapshot
defaultSnapCount=1000
# =============== raft共识配置参数 ===========================
genesisBlockTime=1514533394
hotkeyAddr="12qyocayNF7Lv6C9qW4avxs2E7U41fKSfv"
[store]
name="kvdb"
driver="leveldb"
dbPath="datadir/kvdb"
dbCache=128
[wallet]
minFee=100000
driver="leveldb"
dbPath="datadir/wallet"
dbCache=16
signType="secp256k1"
minerdisable=true
[exec]
isFree=true
minExecFee=0
#!/usr/bin/env bash
#这是个用于分发部署chain33的脚本
#Program:
# This is a chain33 deploy scripts!
SHELL_FOLDER=$(
cd "$(dirname "$0")" || exit 1
pwd
)
echo "curl dir:$SHELL_FOLDER"
if [ "$1" == "start" ]; then
cd "$SHELL_FOLDER"/go-scp/ || exit 1
go build -o go_scp
cp go_scp servers.toml ../
rm -rf go_scp
cd "$SHELL_FOLDER" || exit 1
./go_scp start all
#rm -rf go_scp
#rm -rf servers.toml
rm -rf chain33.tgz
elif [ "$1" == "stop" ]; then
./go_scp stop all
elif [ "$1" == "clear" ]; then
./go_scp clear all
else
echo "Usage: ./raft_deploy.sh [start,stop,clear]"
fi
package main
import (
"fmt"
"log"
"net"
"os"
"path"
"time"
"io"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
//"io/ioutil"
"errors"
"flag"
tml "github.com/BurntSushi/toml"
)
var configPath = flag.String("f", "servers.toml", "configfile")
type ScpInfo struct {
UserName string
PassWord string
HostIp string
Port int
LocalFilePath string
RemoteDir string
}
type CmdInfo struct {
userName string
passWord string
hostIp string
port int
cmd string
remoteDir string
}
type tomlConfig struct {
Title string
Servers map[string]ScpInfo
}
func sshconnect(user, password, host string, port int) (*ssh.Session, error) {
var (
auth []ssh.AuthMethod
addr string
clientConfig *ssh.ClientConfig
client *ssh.Client
session *ssh.Session
err error
)
// get auth method
auth = make([]ssh.AuthMethod, 0)
auth = append(auth, ssh.Password(password))
clientConfig = &ssh.ClientConfig{
User: user,
Auth: auth,
Timeout: 30 * time.Second,
//需要验证服务端,不做验证返回nil就可以
HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {
return nil
},
}
// connet to ssh
addr = fmt.Sprintf("%s:%d", host, port)
if client, err = ssh.Dial("tcp", addr, clientConfig); err != nil {
return nil, err
}
// create session
if session, err = client.NewSession(); err != nil {
return nil, err
}
return session, nil
}
func sftpconnect(user, password, host string, port int) (*sftp.Client, error) {
var (
auth []ssh.AuthMethod
addr string
clientConfig *ssh.ClientConfig
sshClient *ssh.Client
sftpClient *sftp.Client
err error
)
// get auth method
auth = make([]ssh.AuthMethod, 0)
auth = append(auth, ssh.Password(password))
clientConfig = &ssh.ClientConfig{
User: user,
Auth: auth,
Timeout: 30 * time.Second,
//需要验证服务端,不做验证返回nil就可以
HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {
return nil
},
}
// connet to ssh
addr = fmt.Sprintf("%s:%d", host, port)
if sshClient, err = ssh.Dial("tcp", addr, clientConfig); err != nil {
return nil, err
}
// create sftp client
if sftpClient, err = sftp.NewClient(sshClient); err != nil {
return nil, err
}
return sftpClient, nil
}
func ScpFileFromLocalToRemote(si *ScpInfo) {
sftpClient, err := sftpconnect(si.UserName, si.PassWord, si.HostIp, si.Port)
if err != nil {
fmt.Println("sftconnect have a err!")
log.Fatal(err)
panic(err)
}
defer sftpClient.Close()
srcFile, err := os.Open(si.LocalFilePath)
if err != nil {
log.Fatal(err)
panic(err)
}
defer srcFile.Close()
var remoteFileName = path.Base(si.LocalFilePath)
fmt.Println("remoteFileName:", remoteFileName)
dstFile, err := sftpClient.Create(path.Join(si.RemoteDir, remoteFileName))
if err != nil {
log.Fatal(err)
}
defer dstFile.Close()
//bufReader := bufio.NewReader(srcFile)
//b := bytes.NewBuffer(make([]byte,0))
buf := make([]byte, 1024000)
for {
//n, err := bufReader.Read(buf)
n, _ := srcFile.Read(buf)
if err != nil && err != io.EOF {
panic(err)
}
if n == 0 {
break
}
dstFile.Write(buf[0:n])
}
fmt.Println("copy file to remote server finished!")
}
func RemoteExec(cmdInfo *CmdInfo) error {
//A Session only accepts one call to Run, Start or Shell.
session, err := sshconnect(cmdInfo.userName, cmdInfo.passWord, cmdInfo.hostIp, cmdInfo.port)
if err != nil {
return err
}
defer session.Close()
session.Stdout = os.Stdout
session.Stderr = os.Stderr
err = session.Run(cmdInfo.cmd)
return err
}
func remoteScp(si *ScpInfo, reqnum chan struct{}) {
defer func() {
reqnum <- struct{}{}
}()
ScpFileFromLocalToRemote(si)
//session, err := sshconnect("ubuntu", "Fuzamei#123456", "raft15258.chinacloudapp.cn", 22)
fmt.Println("remoteScp file sucessfully!:")
}
func InitCfg(path string) *tomlConfig {
var cfg tomlConfig
if _, err := tml.DecodeFile(path, &cfg); err != nil {
fmt.Println(err)
os.Exit(0)
}
return &cfg
}
func main() {
conf := InitCfg(*configPath)
start := time.Now()
if len(os.Args) == 1 || os.Args[1] == "-h" {
LoadHelp()
return
}
argsWithoutProg := os.Args[1:]
switch argsWithoutProg[0] {
case "-h": //使用帮助
LoadHelp()
case "start":
if len(argsWithoutProg) != 2 {
fmt.Print(errors.New("参数错误").Error())
return
}
if argsWithoutProg[1] == "all" {
startAll(conf)
}
case "stop":
if len(argsWithoutProg) != 2 {
fmt.Print(errors.New("参数错误").Error())
return
}
if argsWithoutProg[1] == "all" {
stopAll(conf)
}
case "clear":
if len(argsWithoutProg) != 2 {
fmt.Print(errors.New("参数错误").Error())
return
}
if argsWithoutProg[1] == "all" {
clearAll(conf)
}
}
////读取当前目录下的文件
//dir_list, e := ioutil.ReadDir("D:/Repository/src/gitlab.33.cn/chain33/chain33/consensus/drivers/raft/tools/scripts")
//if e != nil {
// fmt.Println("read dir error")
// return
//}
//for i, v := range dir_list {
// fmt.Println(i, "=", v.Name())
//}
timeCommon := time.Now()
log.Printf("read common cost time %v\n", timeCommon.Sub(start))
}
func LoadHelp() {
fmt.Println("Available Commands:")
fmt.Println(" start : 启动服务 ")
fmt.Println(" stop : 停止服务")
fmt.Println(" clear : 清空数据")
}
func startAll(conf *tomlConfig) {
//fmt.Println(getCurrentDirectory())
arrMap := make(map[string]*CmdInfo)
//多协程启动部署
reqC := make(chan struct{}, len(conf.Servers))
for index, sc := range conf.Servers {
cmdInfo := &CmdInfo{}
cmdInfo.hostIp = sc.HostIp
cmdInfo.userName = sc.UserName
cmdInfo.port = sc.Port
cmdInfo.passWord = sc.PassWord
cmdInfo.cmd = fmt.Sprintf("mkdir -p %s", sc.RemoteDir)
cmdInfo.remoteDir = sc.RemoteDir
RemoteExec(cmdInfo)
go remoteScp(&sc, reqC)
arrMap[index] = cmdInfo
}
for i := 0; i < len(conf.Servers); i++ {
<-reqC
}
for i, cmdInfo := range arrMap {
cmdInfo.cmd = fmt.Sprintf("cd %s;tar -xvf chain33.tgz;bash raft_conf.sh %s;bash run.sh start", cmdInfo.remoteDir, i)
RemoteExec(cmdInfo)
}
}
func stopAll(conf *tomlConfig) {
//执行速度快,不需要多起多协程工作
for _, sc := range conf.Servers {
cmdInfo := &CmdInfo{}
cmdInfo.hostIp = sc.HostIp
cmdInfo.userName = sc.UserName
cmdInfo.port = sc.Port
cmdInfo.passWord = sc.PassWord
cmdInfo.cmd = fmt.Sprintf("cd %s;bash run.sh stop", sc.RemoteDir)
cmdInfo.remoteDir = sc.RemoteDir
RemoteExec(cmdInfo)
}
}
func clearAll(conf *tomlConfig) {
for _, sc := range conf.Servers {
cmdInfo := &CmdInfo{}
cmdInfo.hostIp = sc.HostIp
cmdInfo.userName = sc.UserName
cmdInfo.port = sc.Port
cmdInfo.passWord = sc.PassWord
cmdInfo.cmd = fmt.Sprintf("cd %s;bash run.sh clear", sc.RemoteDir)
cmdInfo.remoteDir = sc.RemoteDir
RemoteExec(cmdInfo)
}
}
title = "raft"
[servers]
#按实际需求依次配置
[servers.1]
userName="ubuntu"
hostIp="raft15258.chinacloudapp.cn"
passWord="Fuzamei#123456"
port=22
localFilePath="chain33.tgz"
remoteDir="/home/ubuntu/deploy"
# [servers.2]
# userName="ubuntu"
# hostIp="raft15258.chinacloudapp.cn22222"
# passWord="Fuzamei#123456"
# port=22
# localFilePath=""
# remoteDir=""
# [servers.3]
# userName="ubuntu"
# hostIp="raft15258.chinacloudapp.cn22222"
# passWord="Fuzamei#123456"
# port=22
# localFilePath=""
# remoteDir=""
\ No newline at end of file
#!/bin/bash
cmd=$(sed -n '/^[# ]*\[.*\][ ]*/p' servers.conf)
fileName="servers.conf"
serverStr="servers."
getSections() {
sections="$cmd"
}
getInfoByIndex() {
index=$1
nextIndex=$((index + 1))
info=$(cat <"$fileName" | sed -n "/^[# ]*\\[servers.${index}/,/^[# ]*\\[servers.${nextIndex}/p")
}
getInfoByIndexAndKey() {
index=$1
key=$2
info=$(cat <"$fileName" | sed -n "/^[# ]*\\[servers.${index}/,/^[# ]*\\[servers.${nextIndex}/p" | grep -i "$key" | awk -F '=' '{print $2}')
}
main() {
getSections
for line in $sections; do
if [[ $line =~ $serverStr ]]; then
index=$(echo "$line" | awk -F '.' '{print $2}' | awk -F ']' '{print$1}')
getInfoByIndexAndKey "$index" "userName"
echo "servers.$index: userName->$info"
getInfoByIndexAndKey "$index" "hostIp"
echo "servers.$index: hostIp->$info"
getInfoByIndexAndKey "$index" "port"
echo "servers.$index: port->$info"
fi
done
}
main
[servers]
#按实际需求依次配置
[servers.1]
userName=root
hostIp=114.55.149.144
passWord=Fuzamei#123456
pemFilePath=yiliaolian.pem
port=22
localFilePath=chain33.tgz
remoteDir=/root/deploy
[servers.2]
userName=root
hostIp=139.224.19.175
passWord=Fuzamei#123456
pemFilePath=yiliaolian.pem
port=22
localFilePath=chain33.tgz
remoteDir=/root/deploy
[servers.3]
userName=root
hostIp=139.224.82.165
passWord=Fuzamei#123456
pemFilePath=yiliaolian.pem
port=22
localFilePath=chain33.tgz
remoteDir=/root/deploy
\ No newline at end of file
#!/usr/bin/env bash
#这是一个修改配置文件的脚本
nodeId=$1
function echo_green() {
echo -e "\\033[32m$1\\033[0m"
}
function main() {
sed -i "s/singleMode=true/singleMode=true/g" chain33.toml
sed -i "s/nodeId=1/nodeId=$nodeId/g" chain33.toml
}
main
echo_green "修改完成"
#!/usr/bin/env bash
# shellcheck disable=SC2029
########################################################################################################################
##########################chain33自动部署脚本###########################################################################
########################################################################################################################
##############################解析配置文件#######################################################
pemFile=$1
cmd=$(sed -n '/^[# ]*\[.*\][ ]*/p' servers.conf)
fileName="servers.conf"
serverStr="servers."
getSections() {
sections=$cmd
}
getInfoByIndex() {
index=$1
nextIndex=$((index + 1))
info=$(cat <"$fileName" | sed -n "/^[# ]*\\[servers.${index}/,/^[# ]*\\[servers.${nextIndex}/p")
}
getInfoByIndexAndKey() {
index=$1
key=$2
info=$(cat <"$fileName" | sed -n "/^[# ]*\\[servers.${index}/,/^[# ]*\\[servers.${nextIndex}/p" | grep -i "$key" | awk -F '=' '{print $2}')
}
main() {
getSections
for line in $sections; do
if [[ $line =~ $serverStr ]]; then
index=$(echo "$line" | awk -F '.' '{print $2}' | awk -F ']' '{print$1}')
getInfoByIndexAndKey "$index" "userName"
echo "servers.$index: userName->$info"
getInfoByIndexAndKey "$index" "hostIp"
echo "servers.$index: hostIp->$info"
getInfoByIndexAndKey "$index" "port"
echo "servers.$index: port->$info"
fi
done
}
############################从本地copy文件到远程主机上#####################################################################
scpFileFromLocal() {
hostIP=$1
echo "hostIp:$hostIP"
port=$2
echo "port:$port"
userName=$3
echo "userName:$userName"
pemFile=$4
echo "pemFile:$pemFile"
scpFile=$5
deployDir=$6
ssh -i "$pemFile" -p "$port" "$userName"@"$hostIP" "mkdir -p $deployDir"
echo "scp -i $pemFile -P $port $scpFile $userName@$hostIP:$deployDir"
scp -i "$pemFile" -P "$port" "$scpFile" "$userName"@"$hostIP":"$deployDir"
}
####################################解压和启动chain33#################################################################
startChain33() {
hostIP=$1
port=$2
userName=$3
pemFile=$4
deployDir=$5
nodeId=$6
ssh -i "$pemFile" -p "$port" "$userName"@"$hostIP" "cd $deployDir;tar -xvf chain33.tgz;bash raft_conf.sh $nodeId;bash run.sh start"
echo done!
}
stopChain33() {
hostIP=$1
port=$2
userName=$3
pemFile=$4
deployDir=$5
nodeId=$6
ssh -i "$pemFile" -p "$port" "$userName"@"$hostIP" "cd $deployDir;bash run.sh stop"
echo done!
}
clearChain33() {
hostIP=$1
port=$2
userName=$3
pemFile=$4
deployDir=$5
ssh -i "$pemFile" -p "$port" "$userName"@"$hostIP" "cd $deployDir;bash run.sh clear"
echo done!
}
##########################################批量copy本地文件到多个远程主机上面####################################################################
batchScpFileFromLocal() {
getSections
for line in $sections; do
if [[ $line =~ $serverStr ]]; then
index=$(echo "$line" | awk -F '.' '{print $2}' | awk -F ']' '{print$1}')
getInfoByIndexAndKey "$index" "userName"
echo "servers.$index: userName->$info"
userName=$info
getInfoByIndexAndKey "$index" "hostIp"
echo "servers.$index: hostIp->$info"
hostIP=$info
getInfoByIndexAndKey "$index" "port"
echo "servers.$index: port->$info"
port=$info
getInfoByIndexAndKey "$index" "localFilePath"
echo "servers.$index: localFilePath->$info"
localFilePath=$info
getInfoByIndexAndKey "$index" "remoteDir"
echo "servers.$index: remoteDir->$info"
remoteDir=$info
scpFileFromLocal "$hostIP" "$port" "$userName" "$pemFile" "$localFilePath" "$remoteDir"
echo "the servers.$index:scp file successfully!"
fi
done
}
######################################批量执行解压和启动chain33#################################################################################
batchStartChain33() {
getSections
for line in $sections; do
if [[ $line =~ $serverStr ]]; then
index=$(echo "$line" | awk -F '.' '{print $2}' | awk -F ']' '{print$1}')
getInfoByIndexAndKey "$index" "userName"
echo "servers.$index: userName->$info"
userName=$info
getInfoByIndexAndKey "$index" "hostIp"
echo "servers.$index: hostIp->$info"
hostIP=$info
getInfoByIndexAndKey "$index" "port"
echo "servers.$index: port->$info"
port=$info
getInfoByIndexAndKey "$index" "localFilePath"
echo "servers.$index: localFilePath->$info"
localFilePath=$info
getInfoByIndexAndKey "$index" "port"
echo "servers.$index: remoteDir->$info"
remoteDir=$info
startChain33 "$hostIP" "$port" "$userName" "$pemFile" "$remoteDir" "$index"
echo "the servers.$index:start chain33 successfully!"
fi
done
}
######################################批量停止chain33服务######################################################################################
batchStopChain33() {
getSections
for line in $sections; do
if [[ $line =~ $serverStr ]]; then
index=$(echo "$line" | awk -F '.' '{print $2}' | awk -F ']' '{print$1}')
getInfoByIndexAndKey "$index" "userName"
echo "servers.$index: userName->$info"
userName=$info
getInfoByIndexAndKey "$index" "hostIp"
echo "servers.$index: hostIp->$info"
hostIP=$info
getInfoByIndexAndKey "$index" "port"
echo "servers.$index: port->$info"
port=$info
getInfoByIndexAndKey "$index" "localFilePath"
echo "servers.$index: localFilePath->$info"
localFilePath=$info
getInfoByIndexAndKey "$index" "remoteDir"
echo "servers.$index: remoteDir->$info"
remoteDir=$info
stopChain33 "$hostIP" "$port" "$userName" "$pemFile" "$remoteDir"
echo "the servers.$index:stop chain33 successfully!"
fi
done
}
######################################批量清理chain33拥有数据##################################################################################
batchClearChain33() {
getSections
for line in $sections; do
if [[ $line =~ $serverStr ]]; then
index=$(echo "$line" | awk -F '.' '{print $2}' | awk -F ']' '{print$1}')
getInfoByIndexAndKey "$index" "userName"
echo "servers.$index: userName->$info"
userName=$info
getInfoByIndexAndKey "$index" "hostIp"
echo "servers.$index: hostIp->$info"
hostIP=$info
getInfoByIndexAndKey "$index" "port"
echo "servers.$index: port->$info"
port=$info
getInfoByIndexAndKey "$index" "localFilePath"
echo "servers.$index: localFilePath->$info"
localFilePath=$info
getInfoByIndexAndKey "$index" "remoteDir"
echo "servers.$index: remoteDir->$info"
remoteDir=$info
clearChain33 "$hostIP" "$port" "$userName" "$pemFile" "$remoteDir"
echo "the servers.$index:clear chain33 data successfully!"
fi
done
}
######################################本脚本使用指导##################################################################################
#Program:
# This is a chain33 deploy scripts!
if [ "$2" == "start" ]; then
batchStartChain33
elif [ "$2" == "scp" ]; then
batchScpFileFromLocal
elif [ "$2" == "stop" ]; then
batchStopChain33
elif [ "$2" == "clear" ]; then
batchClearChain33
else
echo "Usage: ./raft_deploy.sh [pemFile:认证文件] [scp,start,stop,clear]"
fi
#!/usr/bin/env bash
#Program:
# This is a chain33 deploy scripts!
if [ "$1" == "start" ]; then
nohup ./chain33 >console.log 2>&1 &
echo $! >chain33.pid
elif [ "$1" == "stop" ]; then
PID=$(cat chain33.pid)
kill -9 "$PID"
rm -rf chain33.pid
elif [ "$1" == "clear" ]; then
rm -rf console.log
rm -rf logs/
rm -rf grpc33.log
rm -rf chain33_raft*
rm -rf datadir/
else
echo "Usage: ./run.sh [start,stop,clear]"
fi
[servers]
#按实际需求依次配置
[servers.1]
userName=root
hostIp=114.55.149.144
passWord=Fuzamei#123456
pemFilePath=yiliaolian.pem
port=22
localFilePath=chain33.tgz
remoteDir=/root/deploy
[servers.2]
userName=root
hostIp=139.224.19.175
passWord=Fuzamei#123456
pemFilePath=yiliaolian.pem
port=22
localFilePath=chain33.tgz
remoteDir=/root/deploy
[servers.3]
userName=root
hostIp=139.224.82.165
passWord=Fuzamei#123456
pemFilePath=yiliaolian.pem
port=22
localFilePath=chain33.tgz
remoteDir=/root/deploy
\ No newline at end of file
Title="local"
[log]
# 日志级别,支持debug(dbug)/info/warn/error(eror)/crit
loglevel = "debug"
logConsoleLevel = "info"
# 日志文件名,可带目录,所有生成的日志文件都放到此目录下
logFile = "logs/chain33.log"
# 单个日志文件的最大值(单位:兆)
maxFileSize = 300
# 最多保存的历史日志文件个数
maxBackups = 100
# 最多保存的历史日志消息(单位:天)
maxAge = 28
# 日志文件名是否使用本地事件(否则使用UTC时间)
localTime = true
# 历史日志文件是否压缩(压缩格式为gz)
compress = true
# 是否打印调用源文件和行号
callerFile = false
# 是否打印调用方法
callerFunction = false
[blockchain]
defCacheSize=512
maxFetchBlockNum=128
timeoutSeconds=5
batchBlockNum=128
driver="leveldb"
dbPath="datadir"
dbCache=64
isStrongConsistency=true
singleMode=true
batchsync=false
enableTxQuickIndex=true
[p2p]
seeds=["127.0.0.1:13802"]
enable=true
isSeed=true
serverStart=true
innerSeedEnable=false
useGithub=false
innerBounds=300
msgCacheSize=10240
driver="leveldb"
dbPath="datadir/addrbook"
dbCache=4
grpcLogFile="grpc33.log"
version=199
verMix=199
verMax=199
[rpc]
jrpcBindAddr="localhost:8801"
grpcBindAddr="localhost:8802"
whitelist=["127.0.0.1"]
jrpcFuncWhitelist=["*"]
grpcFuncWhitelist=["*"]
[mempool]
poolCacheSize=10240
minTxFee=100000
[consensus]
name="tendermint"
minerstart=false
[mver.consensus]
fundKeyAddr = "1BQXS6TxaYYG5mADaWij4AxhZZUTpw95a5"
coinReward = 18
coinDevFund = 12
ticketPrice = 10000
powLimitBits = "0x1f00ffff"
retargetAdjustmentFactor = 4
futureBlockTime = 16
ticketFrozenTime = 5 #5s only for test
ticketWithdrawTime = 10 #10s only for test
ticketMinerWaitTime = 2 #2s only for test
maxTxNumber = 1600 #160
targetTimespan = 2304
targetTimePerBlock = 16
[mver.consensus.ForkChainParamV1]
maxTxNumber = 10000
targetTimespan = 288 #only for test
targetTimePerBlock = 2
[mver.consensus.ForkChainParamV2]
powLimitBits = "0x1f2fffff"
[consensus.sub.tendermint]
genesis="14KEKbYtKKQm4wMthSK9J4La4nAiidGozt"
genesisBlockTime=1514533394
timeoutTxAvail=1000
timeoutPropose=3000
timeoutProposeDelta=500
timeoutPrevote=1000
timeoutPrevoteDelta=500
timeoutPrecommit=1000
timeoutPrecommitDelta=500
timeoutCommit=1000
skipTimeoutCommit=false
createEmptyBlocks=false
createEmptyBlocksInterval=0
validatorNodes=["127.0.0.1:46656"]
[store]
name="kvdb"
driver="leveldb"
dbPath="datadir/mavltree"
dbCache=128
[store.sub.kvdb]
enableMavlPrefix=false
enableMVCC=false
[wallet]
minFee=100000
driver="leveldb"
dbPath="wallet"
dbCache=16
signType="secp256k1"
[wallet.sub.ticket]
minerdisable=false
minerwhitelist=["*"]
[exec]
isFree=false
minExecFee=100000
enableStat=false
enableMVCC=false
alias=["token1:token","token2:token","token3:token"]
saveTokenTxList=false
[exec.sub.cert]
# 是否启用证书验证和签名
enable=false
# 加密文件路径
cryptoPath="authdir/crypto"
# 带证书签名类型,支持"auth_ecdsa", "auth_sm2"
signType="auth_ecdsa"
This diff is collapsed.
This diff is collapsed.
package tendermint
import (
"bytes"
"errors"
"fmt"
ttypes "gitlab.33.cn/chain33/chain33/plugin/consensus/tendermint/types"
tmtypes "gitlab.33.cn/chain33/chain33/plugin/dapp/valnode/types"
)
//-----------------------------------------------------------------------------
// BlockExecutor handles block execution and state updates.
// It exposes ApplyBlock(), which validates & executes the block, updates state w/ ABCI responses,
// then commits and updates the mempool atomically, then saves state.
// BlockExecutor provides the context and accessories for properly executing a block.
type BlockExecutor struct {
// save state, validators, consensus params, abci responses here
db *CSStateDB
evpool ttypes.EvidencePool
}
// NewBlockExecutor returns a new BlockExecutor with a NopEventBus.
// Call SetEventBus to provide one.
func NewBlockExecutor(db *CSStateDB, evpool ttypes.EvidencePool) *BlockExecutor {
return &BlockExecutor{
db: db,
evpool: evpool,
}
}
// ValidateBlock validates the given block against the given state.
// If the block is invalid, it returns an error.
// Validation does not mutate state, but does require historical information from the stateDB,
// ie. to verify evidence from a validator at an old height.
func (blockExec *BlockExecutor) ValidateBlock(s State, block *ttypes.TendermintBlock) error {
return validateBlock(blockExec.db, s, block)
}
// ApplyBlock validates the block against the state, executes it against the app,
// fires the relevant events, commits the app, and saves the new state and responses.
// It's the only function that needs to be called
// from outside this package to process and commit an entire block.
// It takes a blockID to avoid recomputing the parts hash.
func (blockExec *BlockExecutor) ApplyBlock(s State, blockID ttypes.BlockID, block *ttypes.TendermintBlock) (State, error) {
if err := blockExec.ValidateBlock(s, block); err != nil {
return s, fmt.Errorf("Commit failed for invalid block: %v", err)
}
// update the state with the block and responses
s, err := updateState(s, blockID, block)
if err != nil {
return s, fmt.Errorf("Commit failed for application: %v", err)
}
blockExec.db.SaveState(s)
// Update evpool now that state is saved
// TODO: handle the crash/recover scenario
// ie. (may need to call Update for last block)
blockExec.evpool.Update(block)
return s, nil
}
// updateState returns a new State updated according to the header and responses.
func updateState(s State, blockID ttypes.BlockID, block *ttypes.TendermintBlock) (State, error) {
// copy the valset so we can apply changes from EndBlock
// and update s.LastValidators and s.Validators
prevValSet := s.Validators.Copy()
nextValSet := prevValSet.Copy()
// update the validator set with the latest abciResponses
lastHeightValsChanged := s.LastHeightValidatorsChanged
// Update validator accums and set state variables
nextValSet.IncrementAccum(1)
// update the params with the latest abciResponses
nextParams := s.ConsensusParams
lastHeightParamsChanged := s.LastHeightConsensusParamsChanged
// NOTE: the AppHash has not been populated.
// It will be filled on state.Save.
return State{
ChainID: s.ChainID,
LastBlockHeight: block.Header.Height,
LastBlockTotalTx: s.LastBlockTotalTx + block.Header.NumTxs,
LastBlockID: blockID,
LastBlockTime: block.Header.Time,
Validators: nextValSet,
LastValidators: s.Validators.Copy(),
LastHeightValidatorsChanged: lastHeightValsChanged,
ConsensusParams: nextParams,
LastHeightConsensusParamsChanged: lastHeightParamsChanged,
LastResultsHash: nil,
AppHash: nil,
}, nil
}
func updateValidators(currentSet *ttypes.ValidatorSet, updates []*tmtypes.ValNode) error {
// If more or equal than 1/3 of total voting power changed in one block, then
// a light client could never prove the transition externally. See
// ./lite/doc.go for details on how a light client tracks validators.
vp23, err := changeInVotingPowerMoreOrEqualToOneThird(currentSet, updates)
if err != nil {
return err
}
if vp23 {
return errors.New("the change in voting power must be strictly less than 1/3")
}
for _, v := range updates {
pubkey, err := ttypes.ConsensusCrypto.PubKeyFromBytes(v.PubKey) // NOTE: expects go-wire encoded pubkey
if err != nil {
return err
}
address := ttypes.GenAddressByPubKey(pubkey)
power := v.Power
// mind the overflow from int64
if power < 0 {
return fmt.Errorf("Power (%d) overflows int64", v.Power)
}
_, val := currentSet.GetByAddress(address)
if val == nil {
// add val
added := currentSet.Add(ttypes.NewValidator(pubkey, power))
if !added {
return fmt.Errorf("Failed to add new validator %X with voting power %d", address, power)
}
} else if v.Power == 0 {
// remove val
_, removed := currentSet.Remove(address)
if !removed {
return fmt.Errorf("Failed to remove validator %X", address)
}
} else {
// update val
val.VotingPower = power
updated := currentSet.Update(val)
if !updated {
return fmt.Errorf("Failed to update validator %X with voting power %d", address, power)
}
}
}
return nil
}
func changeInVotingPowerMoreOrEqualToOneThird(currentSet *ttypes.ValidatorSet, updates []*tmtypes.ValNode) (bool, error) {
threshold := currentSet.TotalVotingPower() * 1 / 3
acc := int64(0)
for _, v := range updates {
pubkey, err := ttypes.ConsensusCrypto.PubKeyFromBytes(v.PubKey) // NOTE: expects go-wire encoded pubkey
if err != nil {
return false, err
}
address := ttypes.GenAddressByPubKey(pubkey)
power := v.Power
// mind the overflow from int64
if power < 0 {
return false, fmt.Errorf("Power (%d) overflows int64", v.Power)
}
_, val := currentSet.GetByAddress(address)
if val == nil {
acc += power
} else {
np := val.VotingPower - power
if np < 0 {
np = -np
}
acc += np
}
if acc >= threshold {
return true, nil
}
}
return false, nil
}
func validateBlock(stateDB *CSStateDB, s State, b *ttypes.TendermintBlock) error {
newTxs := b.Header.NumTxs
// validate basic info
if b.Header.ChainID != s.ChainID {
return fmt.Errorf("Wrong Block.Header.ChainID. Expected %v, got %v", s.ChainID, b.Header.ChainID)
}
if b.Header.Height != s.LastBlockHeight+1 {
return fmt.Errorf("Wrong Block.Header.Height. Expected %v, got %v", s.LastBlockHeight+1, b.Header.Height)
}
// validate prev block info
if !bytes.Equal(b.Header.LastBlockID.Hash, s.LastBlockID.Hash) {
return fmt.Errorf("Wrong Block.Header.LastBlockID. Expected %v, got %v", s.LastBlockID, b.Header.LastBlockID)
}
if b.Header.TotalTxs != s.LastBlockTotalTx+newTxs {
return fmt.Errorf("Wrong Block.Header.TotalTxs. Expected %v, got %v", s.LastBlockTotalTx+newTxs, b.Header.TotalTxs)
}
// validate app info
if !bytes.Equal(b.Header.AppHash, s.AppHash) {
return fmt.Errorf("Wrong Block.Header.AppHash. Expected %X, got %v", s.AppHash, b.Header.AppHash)
}
if !bytes.Equal(b.Header.ConsensusHash, s.ConsensusParams.Hash()) {
return fmt.Errorf("Wrong Block.Header.ConsensusHash. Expected %X, got %v", s.ConsensusParams.Hash(), b.Header.ConsensusHash)
}
if !bytes.Equal(b.Header.LastResultsHash, s.LastResultsHash) {
return fmt.Errorf("Wrong Block.Header.LastResultsHash. Expected %X, got %v", s.LastResultsHash, b.Header.LastResultsHash)
}
if !bytes.Equal(b.Header.ValidatorsHash, s.Validators.Hash()) {
return fmt.Errorf("Wrong Block.Header.ValidatorsHash. Expected %X, got %v", s.Validators.Hash(), b.Header.ValidatorsHash)
}
// Validate block LastCommit.
if b.Header.Height == 1 {
if len(b.LastCommit.Precommits) != 0 {
return errors.New("Block at height 1 (first block) should have no LastCommit precommits")
}
} else {
if len(b.LastCommit.Precommits) != s.LastValidators.Size() {
return fmt.Errorf("Invalid block commit size. Expected %v, got %v",
s.LastValidators.Size(), len(b.LastCommit.Precommits))
}
lastCommit := &ttypes.Commit{TendermintCommit: b.LastCommit}
err := s.LastValidators.VerifyCommit(
s.ChainID, s.LastBlockID, b.Header.Height-1, lastCommit)
if err != nil {
return err
}
}
for _, ev := range b.Evidence.Evidence {
evidence := ttypes.EvidenceEnvelope2Evidence(ev)
if evidence != nil {
if err := VerifyEvidence(stateDB, s, evidence); err != nil {
return ttypes.NewEvidenceInvalidErr(evidence, err)
}
}
}
return nil
}
{"genesis_time":"0001-01-01T00:00:00Z","chain_id":"test-chain-Ep9EcD","validators":[{"pub_key":{"type":"ed25519","data":"220ACBE680DF2473A0CB48987A00FCC1812F106A7390BE6B8E2D31122C992A19"},"power":10,"name":""}],"app_hash":""}
This diff is collapsed.
This diff is collapsed.
{"address":"02A13174B92727C4902DB099E51A3339F48BD45E","pub_key":{"type":"ed25519","data":"220ACBE680DF2473A0CB48987A00FCC1812F106A7390BE6B8E2D31122C992A19"},"last_height":0,"last_round":0,"last_step":0,"priv_key":{"type":"ed25519","data":"B3DC4C0725884EBB7264B92F1D8D37584A64ADE1799D997EC64B4FE3973E08DE220ACBE680DF2473A0CB48987A00FCC1812F106A7390BE6B8E2D31122C992A19"}}
\ No newline at end of file
// Uses nacl's secret_box to encrypt a net.Conn.
// It is (meant to be) an implementation of the STS protocol.
// Note we do not (yet) assume that a remote peer's pubkey
// is known ahead of time, and thus we are technically
// still vulnerable to MITM. (TODO!)
// See docs/sts-final.pdf for more info
package tendermint
import (
"bytes"
crand "crypto/rand"
"crypto/sha256"
"encoding/binary"
"errors"
"io"
"net"
"time"
"gitlab.33.cn/chain33/chain33/common/crypto"
"gitlab.33.cn/chain33/chain33/plugin/consensus/tendermint/types"
"golang.org/x/crypto/nacl/box"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/ripemd160"
)
// 2 + 1024 == 1026 total frame size
const (
dataLenSize = 2 // uint16 to describe the length, is <= dataMaxSize
dataMaxSize = 1024
totalFrameSize = dataMaxSize + dataLenSize
sealedFrameSize = totalFrameSize + secretbox.Overhead
authSigMsgSize = (32) + (64)
) // fixed size (length prefixed) byte arrays
// Implements net.Conn
type SecretConnection struct {
conn io.ReadWriteCloser
recvBuffer []byte
recvNonce *[24]byte
sendNonce *[24]byte
remPubKey crypto.PubKey
shrSecret *[32]byte // shared secret
}
// Performs handshake and returns a new authenticated SecretConnection.
// Returns nil if error in handshake.
// Caller should call conn.Close()
// See docs/sts-final.pdf for more information.
func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (*SecretConnection, error) {
locPubKey := locPrivKey.PubKey()
// Generate ephemeral keys for perfect forward secrecy.
locEphPub, locEphPriv := genEphKeys()
// Write local ephemeral pubkey and receive one too.
// NOTE: every 32-byte string is accepted as a Curve25519 public key
// (see DJB's Curve25519 paper: http://cr.yp.to/ecdh/curve25519-20060209.pdf)
remEphPub, err := shareEphPubKey(conn, locEphPub)
if err != nil {
return nil, err
}
// Compute common shared secret.
shrSecret := computeSharedSecret(remEphPub, locEphPriv)
// Sort by lexical order.
loEphPub, hiEphPub := sort32(locEphPub, remEphPub)
// Check if the local ephemeral public key
// was the least, lexicographically sorted.
locIsLeast := bytes.Equal(locEphPub[:], loEphPub[:])
// Generate nonces to use for secretbox.
recvNonce, sendNonce := genNonces(loEphPub, hiEphPub, locIsLeast)
// Generate common challenge to sign.
challenge := genChallenge(loEphPub, hiEphPub)
// Construct SecretConnection.
sc := &SecretConnection{
conn: conn,
recvBuffer: nil,
recvNonce: recvNonce,
sendNonce: sendNonce,
shrSecret: shrSecret,
}
// Sign the challenge bytes for authentication.
locSignature := signChallenge(challenge, locPrivKey)
// Share (in secret) each other's pubkey & challenge signature
authSigMsg, err := shareAuthSignature(sc, locPubKey, locSignature)
if err != nil {
return nil, err
}
remPubKey, remSignature := authSigMsg.Key, authSigMsg.Sig
if !remPubKey.VerifyBytes(challenge[:], remSignature) {
return nil, errors.New("Challenge verification failed")
}
// We've authorized.
sc.remPubKey = remPubKey
return sc, nil
}
// Returns authenticated remote pubkey
func (sc *SecretConnection) RemotePubKey() crypto.PubKey {
return sc.remPubKey
}
// Writes encrypted frames of `sealedFrameSize`
// CONTRACT: data smaller than dataMaxSize is read atomically.
func (sc *SecretConnection) Write(data []byte) (n int, err error) {
for 0 < len(data) {
var frame = make([]byte, totalFrameSize)
var chunk []byte
if dataMaxSize < len(data) {
chunk = data[:dataMaxSize]
data = data[dataMaxSize:]
} else {
chunk = data
data = nil
}
chunkLength := len(chunk)
binary.BigEndian.PutUint16(frame, uint16(chunkLength))
copy(frame[dataLenSize:], chunk)
// encrypt the frame
var sealedFrame = make([]byte, sealedFrameSize)
secretbox.Seal(sealedFrame[:0], frame, sc.sendNonce, sc.shrSecret)
// fmt.Printf("secretbox.Seal(sealed:%X,sendNonce:%X,shrSecret:%X\n", sealedFrame, sc.sendNonce, sc.shrSecret)
incr2Nonce(sc.sendNonce)
// end encryption
_, err := sc.conn.Write(sealedFrame)
if err != nil {
return n, err
} else {
n += len(chunk)
}
}
return
}
// CONTRACT: data smaller than dataMaxSize is read atomically.
func (sc *SecretConnection) Read(data []byte) (n int, err error) {
if 0 < len(sc.recvBuffer) {
n_ := copy(data, sc.recvBuffer)
sc.recvBuffer = sc.recvBuffer[n_:]
return
}
sealedFrame := make([]byte, sealedFrameSize)
_, err = io.ReadFull(sc.conn, sealedFrame)
if err != nil {
return
}
// decrypt the frame
var frame = make([]byte, totalFrameSize)
// fmt.Printf("secretbox.Open(sealed:%X,recvNonce:%X,shrSecret:%X\n", sealedFrame, sc.recvNonce, sc.shrSecret)
_, ok := secretbox.Open(frame[:0], sealedFrame, sc.recvNonce, sc.shrSecret)
if !ok {
return n, errors.New("Failed to decrypt SecretConnection")
}
incr2Nonce(sc.recvNonce)
// end decryption
var chunkLength = binary.BigEndian.Uint16(frame) // read the first two bytes
if chunkLength > dataMaxSize {
return 0, errors.New("chunkLength is greater than dataMaxSize")
}
var chunk = frame[dataLenSize : dataLenSize+chunkLength]
n = copy(data, chunk)
sc.recvBuffer = chunk[n:]
return
}
// Implements net.Conn
func (sc *SecretConnection) Close() error { return sc.conn.Close() }
func (sc *SecretConnection) LocalAddr() net.Addr { return sc.conn.(net.Conn).LocalAddr() }
func (sc *SecretConnection) RemoteAddr() net.Addr { return sc.conn.(net.Conn).RemoteAddr() }
func (sc *SecretConnection) SetDeadline(t time.Time) error { return sc.conn.(net.Conn).SetDeadline(t) }
func (sc *SecretConnection) SetReadDeadline(t time.Time) error {
return sc.conn.(net.Conn).SetReadDeadline(t)
}
func (sc *SecretConnection) SetWriteDeadline(t time.Time) error {
return sc.conn.(net.Conn).SetWriteDeadline(t)
}
func genEphKeys() (ephPub, ephPriv *[32]byte) {
var err error
ephPub, ephPriv, err = box.GenerateKey(crand.Reader)
if err != nil {
types.PanicCrisis("Could not generate ephemeral keypairs")
}
return
}
func shareEphPubKey(conn io.ReadWriter, locEphPub *[32]byte) (remEphPub *[32]byte, err error) {
var err1, err2 error
Parallel(
func() {
_, err1 = conn.Write(locEphPub[:])
},
func() {
remEphPub = new([32]byte)
_, err2 = io.ReadFull(conn, remEphPub[:])
},
)
if err1 != nil {
return nil, err1
}
if err2 != nil {
return nil, err2
}
return remEphPub, nil
}
func computeSharedSecret(remPubKey, locPrivKey *[32]byte) (shrSecret *[32]byte) {
shrSecret = new([32]byte)
box.Precompute(shrSecret, remPubKey, locPrivKey)
return
}
func sort32(foo, bar *[32]byte) (lo, hi *[32]byte) {
if bytes.Compare(foo[:], bar[:]) < 0 {
lo = foo
hi = bar
} else {
lo = bar
hi = foo
}
return
}
func genNonces(loPubKey, hiPubKey *[32]byte, locIsLo bool) (recvNonce, sendNonce *[24]byte) {
nonce1 := hash24(append(loPubKey[:], hiPubKey[:]...))
nonce2 := new([24]byte)
copy(nonce2[:], nonce1[:])
nonce2[len(nonce2)-1] ^= 0x01
if locIsLo {
recvNonce = nonce1
sendNonce = nonce2
} else {
recvNonce = nonce2
sendNonce = nonce1
}
return
}
func genChallenge(loPubKey, hiPubKey *[32]byte) (challenge *[32]byte) {
return hash32(append(loPubKey[:], hiPubKey[:]...))
}
func signChallenge(challenge *[32]byte, locPrivKey crypto.PrivKey) (signature crypto.Signature) {
signature = locPrivKey.Sign(challenge[:])
return
}
type authSigMessage struct {
Key crypto.PubKey
Sig crypto.Signature
}
func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature crypto.Signature) (*authSigMessage, error) {
var recvMsg authSigMessage
var err1, err2 error
Parallel(
func() {
msgByte := make([]byte, len(pubKey.Bytes())+len(signature.Bytes()))
copy(msgByte, pubKey.Bytes())
copy(msgByte[len(pubKey.Bytes()):], signature.Bytes())
_, err1 = sc.Write(msgByte)
},
func() {
readBuffer := make([]byte, authSigMsgSize)
_, err2 = io.ReadFull(sc, readBuffer)
if err2 != nil {
return
}
//n := int(0) // not used.
//recvMsg = wire.ReadBinary(authSigMessage{}, bytes.NewBuffer(readBuffer), authSigMsgSize, &n, &err2).(authSigMessage)
//secret.Info("shareAuthSignature", "readBuffer", readBuffer)
recvMsg.Key, err2 = types.ConsensusCrypto.PubKeyFromBytes(readBuffer[:32])
if err2 != nil {
return
}
recvMsg.Sig, err2 = types.ConsensusCrypto.SignatureFromBytes(readBuffer[32:])
if err2 != nil {
return
}
})
if err1 != nil {
return nil, err1
}
if err2 != nil {
return nil, err2
}
return &recvMsg, nil
}
//--------------------------------------------------------------------------------
// sha256
func hash32(input []byte) (res *[32]byte) {
hasher := sha256.New()
hasher.Write(input) // nolint: errcheck, gas
resSlice := hasher.Sum(nil)
res = new([32]byte)
copy(res[:], resSlice)
return
}
// We only fill in the first 20 bytes with ripemd160
func hash24(input []byte) (res *[24]byte) {
hasher := ripemd160.New()
hasher.Write(input) // nolint: errcheck, gas
resSlice := hasher.Sum(nil)
res = new([24]byte)
copy(res[:], resSlice)
return
}
// increment nonce big-endian by 2 with wraparound.
func incr2Nonce(nonce *[24]byte) {
incrNonce(nonce)
incrNonce(nonce)
}
// increment nonce big-endian by 1 with wraparound.
func incrNonce(nonce *[24]byte) {
for i := 23; 0 <= i; i-- {
nonce[i] += 1
if nonce[i] != 0 {
return
}
}
}
This diff is collapsed.
This diff is collapsed.
package tendermint
import (
"context"
"encoding/binary"
"errors"
"flag"
"fmt"
"math/rand"
"os"
"testing"
"time"
"gitlab.33.cn/chain33/chain33/blockchain"
"gitlab.33.cn/chain33/chain33/common/address"
"gitlab.33.cn/chain33/chain33/common/limits"
"gitlab.33.cn/chain33/chain33/common/log"
"gitlab.33.cn/chain33/chain33/executor"
"gitlab.33.cn/chain33/chain33/mempool"
"gitlab.33.cn/chain33/chain33/p2p"
pty "gitlab.33.cn/chain33/chain33/plugin/dapp/norm/types"
"gitlab.33.cn/chain33/chain33/queue"
"gitlab.33.cn/chain33/chain33/rpc"
"gitlab.33.cn/chain33/chain33/store"
"gitlab.33.cn/chain33/chain33/types"
"google.golang.org/grpc"
_ "gitlab.33.cn/chain33/chain33/plugin/dapp/init"
_ "gitlab.33.cn/chain33/chain33/plugin/store/init"
_ "gitlab.33.cn/chain33/chain33/system"
)
var (
random *rand.Rand
loopCount int = 10
conn *grpc.ClientConn
c types.Chain33Client
)
func init() {
err := limits.SetLimits()
if err != nil {
panic(err)
}
random = rand.New(rand.NewSource(types.Now().UnixNano()))
log.SetLogLevel("info")
}
func TestTendermintPerf(t *testing.T) {
RaftPerf()
fmt.Println("=======start clear test data!=======")
clearTestData()
}
func RaftPerf() {
q, chain, s, mem, exec, cs, p2p := initEnvTendermint()
defer chain.Close()
defer mem.Close()
defer exec.Close()
defer s.Close()
defer q.Close()
defer cs.Close()
defer p2p.Close()
err := createConn()
for err != nil {
err = createConn()
}
time.Sleep(10 * time.Second)
for i := 0; i < loopCount; i++ {
NormPut()
time.Sleep(time.Second)
}
time.Sleep(10 * time.Second)
}
func initEnvTendermint() (queue.Queue, *blockchain.BlockChain, queue.Module, *mempool.Mempool, queue.Module, queue.Module, queue.Module) {
var q = queue.New("channel")
flag.Parse()
cfg, sub := types.InitCfg("chain33.test.toml")
types.Init(cfg.Title, cfg)
chain := blockchain.New(cfg.BlockChain)
chain.SetQueueClient(q.Client())
exec := executor.New(cfg.Exec, sub.Exec)
exec.SetQueueClient(q.Client())
types.SetMinFee(0)
s := store.New(cfg.Store, sub.Store)
s.SetQueueClient(q.Client())
cs := New(cfg.Consensus, sub.Consensus["tendermint"])
cs.SetQueueClient(q.Client())
mem := mempool.New(cfg.MemPool)
mem.SetQueueClient(q.Client())
network := p2p.New(cfg.P2P)
network.SetQueueClient(q.Client())
rpc.InitCfg(cfg.Rpc)
gapi := rpc.NewGRpcServer(q.Client(), nil)
go gapi.Listen()
return q, chain, s, mem, exec, cs, network
}
func createConn() error {
var err error
url := "127.0.0.1:8802"
fmt.Println("grpc url:", url)
conn, err = grpc.Dial(url, grpc.WithInsecure())
if err != nil {
fmt.Fprintln(os.Stderr, err)
return err
}
c = types.NewChain33Client(conn)
r = rand.New(rand.NewSource(types.Now().UnixNano()))
return nil
}
func generateKey(i, valI int) string {
key := make([]byte, valI)
binary.PutUvarint(key[:10], uint64(valI))
binary.PutUvarint(key[12:24], uint64(i))
if _, err := rand.Read(key[24:]); err != nil {
os.Exit(1)
}
return string(key)
}
func generateValue(i, valI int) string {
value := make([]byte, valI)
binary.PutUvarint(value[:16], uint64(i))
binary.PutUvarint(value[32:128], uint64(i))
if _, err := rand.Read(value[128:]); err != nil {
os.Exit(1)
}
return string(value)
}
func prepareTxList() *types.Transaction {
var key string
var value string
var i int
key = generateKey(i, 32)
value = generateValue(i, 180)
nput := &pty.NormAction_Nput{&pty.NormPut{Key: key, Value: []byte(value)}}
action := &pty.NormAction{Value: nput, Ty: pty.NormActionPut}
tx := &types.Transaction{Execer: []byte("norm"), Payload: types.Encode(action), Fee: fee}
tx.To = address.ExecAddress("norm")
tx.Nonce = random.Int63()
tx.Sign(types.SECP256K1, getprivkey("CC38546E9E659D15E6B4893F0AB32A06D103931A8230B0BDE71459D2B27D6944"))
return tx
}
func clearTestData() {
err := os.RemoveAll("datadir")
if err != nil {
fmt.Println("delete datadir have a err:", err.Error())
}
fmt.Println("test data clear sucessfully!")
}
func NormPut() {
tx := prepareTxList()
reply, err := c.SendTransaction(context.Background(), tx)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
if !reply.IsOk {
fmt.Fprintln(os.Stderr, errors.New(string(reply.GetMsg())))
return
}
}
package tendermint
import (
"time"
)
var (
tickTockBufferSize = 10
)
// TimeoutTicker is a timer that schedules timeouts
// conditional on the height/round/step in the timeoutInfo.
// The timeoutInfo.Duration may be non-positive.
type TimeoutTicker interface {
Start()
Stop()
Chan() <-chan timeoutInfo // on which to receive a timeout
ScheduleTimeout(ti timeoutInfo) // reset the timer
}
// timeoutTicker wraps time.Timer,
// scheduling timeouts only for greater height/round/step
// than what it's already seen.
// Timeouts are scheduled along the tickChan,
// and fired on the tockChan.
type timeoutTicker struct {
timer *time.Timer
tickChan chan timeoutInfo // for scheduling timeouts
tockChan chan timeoutInfo // for notifying about them
}
// NewTimeoutTicker returns a new TimeoutTicker.
func NewTimeoutTicker() TimeoutTicker {
tt := &timeoutTicker{
timer: time.NewTimer(0),
tickChan: make(chan timeoutInfo, tickTockBufferSize),
tockChan: make(chan timeoutInfo, tickTockBufferSize),
}
tt.stopTimer() // don't want to fire until the first scheduled timeout
return tt
}
// OnStart implements cmn.Service. It starts the timeout routine.
func (t *timeoutTicker) Start() {
go t.timeoutRoutine()
}
// OnStop implements cmn.Service. It stops the timeout routine.
func (t *timeoutTicker) Stop() {
t.stopTimer()
}
// Chan returns a channel on which timeouts are sent.
func (t *timeoutTicker) Chan() <-chan timeoutInfo {
return t.tockChan
}
// ScheduleTimeout schedules a new timeout by sending on the internal tickChan.
// The timeoutRoutine is always available to read from tickChan, so this won't block.
// The scheduling may fail if the timeoutRoutine has already scheduled a timeout for a later height/round/step.
func (t *timeoutTicker) ScheduleTimeout(ti timeoutInfo) {
t.tickChan <- ti
}
//-------------------------------------------------------------
// stop the timer and drain if necessary
func (t *timeoutTicker) stopTimer() {
// Stop() returns false if it was already fired or was stopped
if !t.timer.Stop() {
select {
case <-t.timer.C:
default:
tendermintlog.Debug("Timer already stopped")
}
}
}
// send on tickChan to start a new timer.
// timers are interupted and replaced by new ticks from later steps
// timeouts of 0 on the tickChan will be immediately relayed to the tockChan
func (t *timeoutTicker) timeoutRoutine() {
tendermintlog.Debug("Starting timeout routine")
var ti timeoutInfo
for {
select {
case newti := <-t.tickChan:
tendermintlog.Debug("Received tick", "old_ti", ti, "new_ti", newti)
// ignore tickers for old height/round/step
if newti.Height < ti.Height {
continue
} else if newti.Height == ti.Height {
if newti.Round < ti.Round {
continue
} else if newti.Round == ti.Round {
if ti.Step > 0 && newti.Step <= ti.Step {
continue
}
}
}
// stop the last timer
t.stopTimer()
// update timeoutInfo and reset timer
// NOTE time.Timer allows duration to be non-positive
ti = newti
t.timer.Reset(ti.Duration)
tendermintlog.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
case <-t.timer.C:
tendermintlog.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step)
// go routine here guarantees timeoutRoutine doesn't block.
// Determinism comes from playback in the receiveRoutine.
// We can eliminate it by merging the timeoutRoutine into receiveRoutine
// and managing the timeouts ourselves with a millisecond ticker
go func(toi timeoutInfo) { t.tockChan <- toi }(ti)
}
}
}
#!/bin/bash
# shellcheck disable=SC1078
# shellcheck disable=SC1079
# shellcheck disable=SC1117
# shellcheck disable=SC2002
# shellcheck disable=SC2003
# shellcheck disable=SC2086
# shellcheck disable=SC2091
# shellcheck disable=SC2116
# shellcheck disable=SC2129
# shellcheck disable=SC2140
# shellcheck disable=SC2162
# shellcheck disable=SC2181
package="chain33_tendermint_config.tar.gz"
log_file=".auto_deploy.log"
config_file="auto_deploy.config"
serverStr="servers"
InitLog() {
if [ -f ${log_file} ]; then
rm ${log_file}
fi
touch ${log_file}
}
Log() {
if [ -e ${log_file} ]; then
$(touch ${log_file})
fi
# get current time
local curtime
curtime=$(date "+%Y-%m-%d %H:%M:%S")
echo "[$curtime] $* ..." >>$log_file
}
GetInputFile() {
echo 'Please input the file: (such as "chain33 chain33-cli genesis.json" ...) '
read file
# todo: file detection
Log "The input file is ${file}"
}
PackageFiles() {
Log "Begin to package the files: ${file}"
$(tar zcf ${package} $file)
}
GetUserNamePasswdAndPath() {
echo "Which way to get environment? 1) Input 2) Config file"
read choice
if [ ${choice} -eq 1 ]; then
echo 'Please input the username, password and path of the destination: (such as "ubuntu 123456 /home/ubuntu/chain33")'
read destInfo
username=$(echo ${destInfo} | awk -F ' ' '{print $1}')
password=$(echo ${destInfo} | awk -F ' ' '{print $2}')
remote_dir=$(echo ${destInfo} | awk -F ' ' '{print $3}')
echo 'Please input ip list of your destination: (such as "192.168.3.143 192.168.3.144 192.168.3.145 192.168.3.146")'
read iplist
index=0
CreateNewConfigFile
for ip in $(echo ${iplist}); do
index=$(expr $index + 1)
echo "[servers.${index}]" >>${config_file}
echo "userName:${username}" >>${config_file}
echo "password:${password}" >>${config_file}
echo "hostIp:${ip}" >>${config_file}
echo "path:${remote_dir}" >>${config_file}
done
Log "The dest ip is ${ip} and path is ${remote_dir}"
elif [ ${choice} -eq 2 ]; then
ShowConfigInfo
echo "Does the config of destination right?(yes/no)"
read input
if [ "X${input}" = "Xno" ]; then
echo "The config file is wrong. You can config it manually."
return 1
fi
elif [ ${choice} -eq 3 ]; then
echo "Wrong input..."
return 2
fi
ShowConfigInfo
}
SendFileAndDecompressFile() {
getSections
for line in $sections; do
if [[ $line =~ $serverStr ]]; then
index=$(echo $line | awk -F '.' '{print $2}' | awk -F ']' '{print$1}')
getInfoByIndexAndKey $index "userName"
username=${info}
getInfoByIndexAndKey $index "password"
password=${info}
getInfoByIndexAndKey $index "hostIp"
ip=${info}
getInfoByIndexAndKey $index "path"
remote_dir=${info}
ExpectCmd "scp ${package} ${username}@${ip}:${remote_dir}"
if [ $? -ne 0 ]; then
Log "Send file failed, this tool will stoped..."
return 1
fi
ExpectCmd "ssh ${username}@${ip} tar zxf ${remote_dir}/${package} -C ${remote_dir}"
if [ $? -ne 0 ]; then
Log "Decompress file failed, this tool will stoped..."
return 2
fi
fi
done
}
ExpectCmd() {
cmd=$*
expect -c "
spawn ${cmd}
expect {
"yes" { send "yes\\r"; exp_continue }
"password" { send "$password\\r" }
}
expect eof"
}
CreateNewConfigFile() {
if [ -f ${config_file} ]; then
rm ${config_file}
fi
touch ${config_file}
}
ShowConfigInfo() {
if [ ! -f ${config_file} ]; then
Log "Config file is not existed."
return 1
fi
getSections
for line in $sections; do
if [[ $line =~ $serverStr ]]; then
index=$(echo $line | awk -F '.' '{print $2}' | awk -F ']' '{print$1}')
getInfoByIndexAndKey $index "userName"
echo "servers.$index: userName->$info"
getInfoByIndexAndKey $index "password"
echo "servers.$index: password->$info"
getInfoByIndexAndKey $index "hostIp"
echo "servers.$index: hostIp->$info"
getInfoByIndexAndKey $index "path"
echo "servers.$index: path->$info"
fi
done
}
getSections() {
sections=$(sed -n '/^[# ]*\[.*\][ ]*/p' ${config_file})
}
getInfoByIndex() {
index=$1
nextIndex=$(expr ${index} + 1)
info=$(cat ${config_file} | sed -n "/^[# ]*\[servers.${index}/,/^[# ]*\[servers.${nextIndex}/p")
}
getInfoByIndexAndKey() {
index=$1
nextIndex=$(expr ${index} + 1)
key=$2
info=$(cat ${config_file} | sed -n "/^[# ]*\[servers.${index}/,/^[# ]*\[servers.${nextIndex}/p" | grep -i $key | awk -F ':' '{print $2}')
}
help() {
echo "***************************************************************************************************"
echo "*"
echo "* This tool can send file to specified path."
echo "* And you should input the file first(It doesn't support get file auto-matically now)"
echo "* Then it will pack those file into a package and send to the environment."
echo "*"
echo "* Note: You should move the file to the current directory, otherwise the packing process will be failed."
echo "*"
echo "***************************************************************************************************"
}
main() {
# Help for this tool
help
# Init log file
InitLog
# Input the file want to be translate
GetInputFile
# Package the files
PackageFiles
if [ $? -ne 0 ]; then
Log "Pachage file err, this tool will be stoped..."
exit
fi
# Input the IP and path of the destination
GetUserNamePasswdAndPath
if [ $? -ne 0 ]; then
Log "GetUserNamePasswdAndPath err, this tool will be stoped..."
exit
fi
# Send and decompress the package
SendFileAndDecompressFile
if [ $? -eq 1 ]; then
echo "Send file err and exit soon..."
exit
elif [ $? -eq 2 ]; then
echo "Decompress file err and exit soon..."
fi
}
main
package main
import (
"bytes"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"os"
"strconv"
"time"
"gitlab.33.cn/chain33/chain33/common"
"gitlab.33.cn/chain33/chain33/common/address"
"gitlab.33.cn/chain33/chain33/common/crypto"
ty "gitlab.33.cn/chain33/chain33/plugin/dapp/valnode/types"
rpctypes "gitlab.33.cn/chain33/chain33/rpc/types"
"gitlab.33.cn/chain33/chain33/types"
)
const fee = 1e6
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
var r *rand.Rand
var TxHeightOffset int64 = 0
func main() {
if len(os.Args) == 1 || os.Args[1] == "-h" {
LoadHelp()
return
}
fmt.Println("jrpc url:", os.Args[2]+":8801")
r = rand.New(rand.NewSource(time.Now().UnixNano()))
argsWithoutProg := os.Args[1:]
switch argsWithoutProg[0] {
case "-h": //使用帮助
LoadHelp()
case "perf":
if len(argsWithoutProg) != 6 {
fmt.Print(errors.New("参数错误").Error())
return
}
Perf(argsWithoutProg[1], argsWithoutProg[2], argsWithoutProg[3], argsWithoutProg[4], argsWithoutProg[5])
case "put":
if len(argsWithoutProg) != 3 {
fmt.Print(errors.New("参数错误").Error())
return
}
Put(argsWithoutProg[1], argsWithoutProg[2], "")
case "get":
if len(argsWithoutProg) != 3 {
fmt.Print(errors.New("参数错误").Error())
return
}
Get(argsWithoutProg[1], argsWithoutProg[2])
case "valnode":
if len(argsWithoutProg) != 4 {
fmt.Print(errors.New("参数错误").Error())
return
}
ValNode(argsWithoutProg[1], argsWithoutProg[2], argsWithoutProg[3])
}
}
func LoadHelp() {
fmt.Println("Available Commands:")
fmt.Println("perf [ip, size, num, interval, duration] {offset} : 写数据性能测试")
fmt.Println("put [ip, size] : 写数据")
fmt.Println("get [ip, hash] : 读数据")
fmt.Println("valnode [ip, pubkey, power] : 增加/删除/修改tendermint节点")
}
func Perf(ip, size, num, interval, duration string) {
var numThread int
numInt, err := strconv.Atoi(num)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
intervalInt, err := strconv.Atoi(interval)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
durInt, err := strconv.Atoi(duration)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
if numInt < 10 {
numThread = 1
} else if numInt > 100 {
numThread = 10
} else {
numThread = numInt / 10
}
maxTxPerAcc := 50
ch := make(chan struct{}, numThread)
for i := 0; i < numThread; i++ {
go func() {
txCount := 0
_, priv := genaddress()
for sec := 0; durInt == 0 || sec < durInt; {
setTxHeight(ip)
for txs := 0; txs < numInt/numThread; txs++ {
if txCount >= maxTxPerAcc {
_, priv = genaddress()
txCount = 0
}
Put(ip, size, common.ToHex(priv.Bytes()))
txCount++
}
time.Sleep(time.Second * time.Duration(intervalInt))
sec += intervalInt
}
ch <- struct{}{}
}()
}
for j := 0; j < numThread; j++ {
<-ch
}
}
func Put(ip string, size string, privkey string) {
sizeInt, err := strconv.Atoi(size)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
url := "http://" + ip + ":8801"
if privkey == "" {
_, priv := genaddress()
privkey = common.ToHex(priv.Bytes())
}
payload := RandStringBytes(sizeInt)
//fmt.Println("payload:", common.ToHex([]byte(payload)))
tx := &types.Transaction{Execer: []byte("user.write"), Payload: []byte(payload), Fee: 1e6}
tx.To = address.ExecAddress("user.write")
tx.Expire = TxHeightOffset + types.TxHeightFlag
tx.Sign(types.SECP256K1, getprivkey(privkey))
poststr := fmt.Sprintf(`{"jsonrpc":"2.0","id":2,"method":"Chain33.SendTransaction","params":[{"data":"%v"}]}`,
common.ToHex(types.Encode(tx)))
resp, err := http.Post(url, "application/json", bytes.NewBufferString(poststr))
if err != nil {
fmt.Println(err)
return
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("returned JSON: %s\n", string(b))
}
func Get(ip string, hash string) {
url := "http://" + ip + ":8801"
fmt.Println("transaction hash:", hash)
poststr := fmt.Sprintf(`{"jsonrpc":"2.0","id":2,"method":"Chain33.QueryTransaction","params":[{"hash":"%s"}]}`, hash)
resp, err := http.Post(url, "application/json", bytes.NewBufferString(poststr))
if err != nil {
fmt.Println(err)
return
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("returned JSON: %s\n", string(b))
}
func setTxHeight(ip string) {
url := "http://" + ip + ":8801"
poststr := fmt.Sprintf(`{"jsonrpc":"2.0","id":2,"method":"Chain33.GetLastHeader","params":[]}`)
resp, err := http.Post(url, "application/json", bytes.NewBufferString(poststr))
if err != nil {
fmt.Println(err)
return
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println(err)
return
}
//fmt.Printf("returned JSON: %s\n", string(b))
msg := &RespMsg{}
err = json.Unmarshal(b, msg)
if err != nil {
fmt.Println(err)
return
}
TxHeightOffset = msg.Result.Height
fmt.Println("TxHeightOffset:", TxHeightOffset)
}
type RespMsg struct {
Id int64 `json:"id"`
Result rpctypes.Header `json:"result"`
Err string `json:"error"`
}
func getprivkey(key string) crypto.PrivKey {
cr, err := crypto.New(types.GetSignName("", types.SECP256K1))
if err != nil {
panic(err)
}
bkey, err := common.FromHex(key)
if err != nil {
panic(err)
}
priv, err := cr.PrivKeyFromBytes(bkey)
if err != nil {
panic(err)
}
return priv
}
func genaddress() (string, crypto.PrivKey) {
cr, err := crypto.New(types.GetSignName("", types.SECP256K1))
if err != nil {
panic(err)
}
privto, err := cr.GenKey()
if err != nil {
panic(err)
}
addrto := address.PubKeyToAddress(privto.PubKey().Bytes())
fmt.Println("addr:", addrto.String())
return addrto.String(), privto
}
func RandStringBytes(n int) string {
b := make([]byte, n)
rand.Seed(time.Now().UnixNano())
for i := range b {
b[i] = letterBytes[rand.Intn(len(letterBytes))]
}
return string(b)
}
func ValNode(ip, pubkey, power string) {
url := "http://" + ip + ":8801"
fmt.Println(pubkey, ":", power)
pubkeybyte, err := hex.DecodeString(pubkey)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
powerInt, err := strconv.Atoi(power)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
_, priv := genaddress()
privkey := common.ToHex(priv.Bytes())
nput := &ty.ValNodeAction_Node{Node: &ty.ValNode{PubKey: pubkeybyte, Power: int64(powerInt)}}
action := &ty.ValNodeAction{Value: nput, Ty: ty.ValNodeActionUpdate}
tx := &types.Transaction{Execer: []byte("valnode"), Payload: types.Encode(action), Fee: fee}
tx.To = address.ExecAddress("valnode")
tx.Nonce = r.Int63()
tx.Sign(types.SECP256K1, getprivkey(privkey))
poststr := fmt.Sprintf(`{"jsonrpc":"2.0","id":2,"method":"Chain33.SendTransaction","params":[{"data":"%v"}]}`,
common.ToHex(types.Encode(tx)))
resp, err := http.Post(url, "application/json", bytes.NewBufferString(poststr))
if err != nil {
fmt.Println(err)
return
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("returned JSON: %s\n", string(b))
}
This diff is collapsed.
package types
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"github.com/gogo/protobuf/proto"
"gitlab.33.cn/chain33/chain33/common/crypto"
"gitlab.33.cn/chain33/chain33/common/merkle"
tmtypes "gitlab.33.cn/chain33/chain33/plugin/dapp/valnode/types"
)
// ErrEvidenceInvalid wraps a piece of evidence and the error denoting how or why it is invalid.
type ErrEvidenceInvalid struct {
Evidence Evidence
ErrorValue error
}
func NewEvidenceInvalidErr(ev Evidence, err error) *ErrEvidenceInvalid {
return &ErrEvidenceInvalid{ev, err}
}
// Error returns a string representation of the error.
func (err *ErrEvidenceInvalid) Error() string {
return Fmt("Invalid evidence: %v. Evidence: %v", err.ErrorValue, err.Evidence)
}
//-------------------------------------------
const (
DuplicateVote = "DuplicateVote"
MockGood = "MockGood"
MockBad = "MockBad"
)
var EvidenceType2Type map[string]reflect.Type
var EvidenceType2Obj map[string]Evidence
// Evidence represents any provable malicious activity by a validator
type Evidence interface {
Height() int64 // height of the equivocation
Address() []byte // address of the equivocating validator
Index() int // index of the validator in the validator set
Hash() []byte // hash of the evidence
Verify(chainID string) error // verify the evidence
Equal(Evidence) bool // check equality of evidence
String() string
Copy() Evidence
TypeName() string
SetChild(child proto.Message)
Child() proto.Message
}
//-------------------------------------------
// EvidenceList is a list of Evidence. Evidences is not a word.
type EvidenceList []Evidence
// Hash returns the simple merkle root hash of the EvidenceList.
func (evl EvidenceList) Hash() []byte {
// Recursive impl.
// Copied from tmlibs/merkle to avoid allocations
switch len(evl) {
case 0:
return nil
case 1:
return evl[0].Hash()
default:
left := evl[:(len(evl)+1)/2].Hash()
right := evl[(len(evl)+1)/2:].Hash()
return merkle.GetHashFromTwoHash(left, right)
}
}
func (evl EvidenceList) String() string {
s := ""
for _, e := range evl {
s += Fmt("%s\t\t", e)
}
return s
}
// Has returns true if the evidence is in the EvidenceList.
func (evl EvidenceList) Has(evidence Evidence) bool {
for _, ev := range evl {
if ev.Equal(evidence) {
return true
}
}
return false
}
//-------------------------------------------
// DuplicateVoteEvidence contains evidence a validator signed two conflicting votes.
type DuplicateVoteEvidence struct {
*tmtypes.DuplicateVoteEvidence
}
// String returns a string representation of the evidence.
func (dve *DuplicateVoteEvidence) String() string {
return Fmt("VoteA: %v; VoteB: %v", dve.VoteA, dve.VoteB)
}
// Height returns the height this evidence refers to.
func (dve *DuplicateVoteEvidence) Height() int64 {
return dve.VoteA.Height
}
// Address returns the address of the validator.
func (dve *DuplicateVoteEvidence) Address() []byte {
pubkey, err := PubKeyFromString(dve.PubKey)
if err != nil {
return nil
}
return GenAddressByPubKey(pubkey)
}
// Index returns the index of the validator.
func (dve *DuplicateVoteEvidence) Index() int {
return int(dve.VoteA.ValidatorIndex)
}
// Hash returns the hash of the evidence.
func (dve *DuplicateVoteEvidence) Hash() []byte {
return SimpleHashFromBinary(dve)
}
// Verify returns an error if the two votes aren't conflicting.
// To be conflicting, they must be from the same validator, for the same H/R/S, but for different blocks.
func (dve *DuplicateVoteEvidence) Verify(chainID string) error {
// H/R/S must be the same
if dve.VoteA.Height != dve.VoteB.Height ||
dve.VoteA.Round != dve.VoteB.Round ||
dve.VoteA.Type != dve.VoteB.Type {
return fmt.Errorf("DuplicateVoteEvidence Error: H/R/S does not match. Got %v and %v", dve.VoteA, dve.VoteB)
}
// Address must be the same
if !bytes.Equal(dve.VoteA.ValidatorAddress, dve.VoteB.ValidatorAddress) {
return fmt.Errorf("DuplicateVoteEvidence Error: Validator addresses do not match. Got %X and %X", dve.VoteA.ValidatorAddress, dve.VoteB.ValidatorAddress)
}
// XXX: Should we enforce index is the same ?
if dve.VoteA.ValidatorIndex != dve.VoteB.ValidatorIndex {
return fmt.Errorf("DuplicateVoteEvidence Error: Validator indices do not match. Got %d and %d", dve.VoteA.ValidatorIndex, dve.VoteB.ValidatorIndex)
}
blockIDA := BlockID{
*dve.VoteA.BlockID,
}
blockIDB := BlockID{
*dve.VoteB.BlockID,
}
// BlockIDs must be different
if blockIDA.Equals(blockIDB) {
return fmt.Errorf("DuplicateVoteEvidence Error: BlockIDs are the same (%v) - not a real duplicate vote!", dve.VoteA.BlockID)
}
// Signatures must be valid
pubkey, err := PubKeyFromString(dve.PubKey)
if err != nil {
return fmt.Errorf("DuplicateVoteEvidence Error: pubkey[%v] to PubKey failed:%v", dve.PubKey, err)
}
sigA, err := ConsensusCrypto.SignatureFromBytes(dve.VoteA.Signature)
if err != nil {
return fmt.Errorf("DuplicateVoteEvidence Error: SIGA[%v] to signature failed:%v", dve.VoteA.Signature, err)
}
sigB, err := ConsensusCrypto.SignatureFromBytes(dve.VoteB.Signature)
if err != nil {
return fmt.Errorf("DuplicateVoteEvidence Error: SIGB[%v] to signature failed:%v", dve.VoteB.Signature, err)
}
vote := &Vote{
dve.VoteA,
}
if !pubkey.VerifyBytes(SignBytes(chainID, vote), sigA) {
return fmt.Errorf("DuplicateVoteEvidence Error verifying VoteA: %v", ErrVoteInvalidSignature)
}
vote = &Vote{
dve.VoteB,
}
if !pubkey.VerifyBytes(SignBytes(chainID, vote), sigB) {
return fmt.Errorf("DuplicateVoteEvidence Error verifying VoteB: %v", ErrVoteInvalidSignature)
}
return nil
}
// Equal checks if two pieces of evidence are equal.
func (dve *DuplicateVoteEvidence) Equal(ev Evidence) bool {
if _, ok := ev.(*DuplicateVoteEvidence); !ok {
return false
}
if dve == nil {
return false
}
// just check their hashes
return bytes.Equal(SimpleHashFromBinary(dve), SimpleHashFromBinary(ev.(*DuplicateVoteEvidence)))
}
func (dve *DuplicateVoteEvidence) TypeName() string {
return DuplicateVote
}
func (dve *DuplicateVoteEvidence) Copy() Evidence {
return &DuplicateVoteEvidence{}
}
func (dve *DuplicateVoteEvidence) SetChild(child proto.Message) {
dve.DuplicateVoteEvidence = child.(*tmtypes.DuplicateVoteEvidence)
}
func (dve *DuplicateVoteEvidence) Child() proto.Message {
return dve.DuplicateVoteEvidence
}
func SimpleHashFromBinary(item *DuplicateVoteEvidence) []byte {
bytes, e := json.Marshal(item)
if e != nil {
//commonlog.Error("SimpleHashFromBinary marshal failed", "type", item, "error", e)
panic(Fmt("SimpleHashFromBinary marshal failed, err:%v", e))
}
return crypto.Ripemd160(bytes)
}
func EvidenceEnvelope2Evidence(envelope *tmtypes.EvidenceEnvelope) Evidence {
if v, ok := EvidenceType2Type[envelope.TypeName]; ok {
realMsg2 := reflect.New(v).Interface()
err := proto.Unmarshal(envelope.Data, realMsg2.(proto.Message))
if err != nil {
panic(Fmt("Evidence is not valid", "evidenceType", envelope.TypeName, "err", err))
}
if evidence, ok2 := EvidenceType2Obj[envelope.TypeName]; ok2 {
evidence = evidence.Copy()
evidence.SetChild(realMsg2.(proto.Message))
return evidence.(Evidence)
}
}
return nil
}
//-----------------------------------------------------------------
// UNSTABLE
type MockGoodEvidence struct {
Height_ int64
Address_ []byte
Index_ int
}
// UNSTABLE
func NewMockGoodEvidence(height int64, index int, address []byte) MockGoodEvidence {
return MockGoodEvidence{height, address, index}
}
func (e MockGoodEvidence) Height() int64 { return e.Height_ }
func (e MockGoodEvidence) Address() []byte { return e.Address_ }
func (e MockGoodEvidence) Index() int { return e.Index_ }
func (e MockGoodEvidence) Hash() []byte {
return []byte(Fmt("%d-%d", e.Height_, e.Index_))
}
func (e MockGoodEvidence) Verify(chainID string) error { return nil }
func (e MockGoodEvidence) Equal(ev Evidence) bool {
e2 := ev.(MockGoodEvidence)
return e.Height_ == e2.Height_ &&
bytes.Equal(e.Address_, e2.Address_) &&
e.Index_ == e2.Index_
}
func (e MockGoodEvidence) String() string {
return Fmt("GoodEvidence: %d/%s/%d", e.Height_, e.Address_, e.Index_)
}
func (e MockGoodEvidence) TypeName() string {
return MockGood
}
func (e MockGoodEvidence) Copy() Evidence {
return &MockGoodEvidence{}
}
func (e MockGoodEvidence) SetChild(proto.Message) {}
func (e MockGoodEvidence) Child() proto.Message {
return nil
}
// UNSTABLE
type MockBadEvidence struct {
MockGoodEvidence
}
func (e MockBadEvidence) Verify(chainID string) error { return fmt.Errorf("MockBadEvidence") }
func (e MockBadEvidence) Equal(ev Evidence) bool {
e2 := ev.(MockBadEvidence)
return e.Height_ == e2.Height_ &&
bytes.Equal(e.Address_, e2.Address_) &&
e.Index_ == e2.Index_
}
func (e MockBadEvidence) String() string {
return Fmt("BadEvidence: %d/%s/%d", e.Height_, e.Address_, e.Index_)
}
func (e MockBadEvidence) TypeName() string {
return MockBad
}
func (e MockBadEvidence) Copy() Evidence {
return &MockBadEvidence{}
}
func (e MockBadEvidence) SetChild(proto.Message) {}
func (e MockBadEvidence) Child() proto.Message {
return nil
}
//------------------------------------------------------
// evidence pool
// EvidencePool defines the EvidencePool interface used by the ConsensusState.
// UNSTABLE
type EvidencePool interface {
PendingEvidence() []Evidence
AddEvidence(Evidence) error
Update(*TendermintBlock)
}
// MockMempool is an empty implementation of a Mempool, useful for testing.
// UNSTABLE
type MockEvidencePool struct {
}
func (m MockEvidencePool) PendingEvidence() []Evidence { return nil }
func (m MockEvidencePool) AddEvidence(Evidence) error { return nil }
func (m MockEvidencePool) Update(*TendermintBlock) {}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment