Commit 8ed23987 authored by 张振华's avatar 张振华

commit dpos

parent 8bf90244
Title="local"
[log]
# 日志级别,支持debug(dbug)/info/warn/error(eror)/crit
loglevel = "debug"
logConsoleLevel = "info"
# 日志文件名,可带目录,所有生成的日志文件都放到此目录下
logFile = "logs/chain33.log"
# 单个日志文件的最大值(单位:兆)
maxFileSize = 300
# 最多保存的历史日志文件个数
maxBackups = 100
# 最多保存的历史日志消息(单位:天)
maxAge = 28
# 日志文件名是否使用本地事件(否则使用UTC时间)
localTime = true
# 历史日志文件是否压缩(压缩格式为gz)
compress = true
# 是否打印调用源文件和行号
callerFile = false
# 是否打印调用方法
callerFunction = false
[blockchain]
defCacheSize=512
maxFetchBlockNum=128
timeoutSeconds=5
batchBlockNum=128
driver="leveldb"
dbPath="datadir"
dbCache=64
isStrongConsistency=true
singleMode=true
batchsync=false
enableTxQuickIndex=true
[p2p]
seeds=["127.0.0.1:13802"]
enable=true
isSeed=true
serverStart=true
innerSeedEnable=false
useGithub=false
innerBounds=300
msgCacheSize=10240
driver="leveldb"
dbPath="datadir/addrbook"
dbCache=4
grpcLogFile="grpc33.log"
version=199
verMix=199
verMax=199
[rpc]
jrpcBindAddr="localhost:8801"
grpcBindAddr="localhost:8802"
whitelist=["127.0.0.1"]
jrpcFuncWhitelist=["*"]
grpcFuncWhitelist=["*"]
[mempool]
name="timeline"
poolCacheSize=10240
minTxFee=100000
[consensus]
name="tendermint"
minerstart=false
[mver.consensus]
fundKeyAddr = "1BQXS6TxaYYG5mADaWij4AxhZZUTpw95a5"
coinReward = 18
coinDevFund = 12
ticketPrice = 10000
powLimitBits = "0x1f00ffff"
retargetAdjustmentFactor = 4
futureBlockTime = 16
ticketFrozenTime = 5 #5s only for test
ticketWithdrawTime = 10 #10s only for test
ticketMinerWaitTime = 2 #2s only for test
maxTxNumber = 1600 #160
targetTimespan = 2304
targetTimePerBlock = 16
[mver.consensus.ForkChainParamV1]
maxTxNumber = 10000
targetTimespan = 288 #only for test
targetTimePerBlock = 2
[mver.consensus.ForkChainParamV2]
powLimitBits = "0x1f2fffff"
[consensus.sub.dpos]
genesis="14KEKbYtKKQm4wMthSK9J4La4nAiidGozt"
genesisBlockTime=1514533394
timeoutCheckConnections=1000
timeoutVoting=3000
timeoutWaitNotify=2000
createEmptyBlocks=false
createEmptyBlocksInterval=0
validatorNodes=["127.0.0.1:46656"]
blockInterval=3
continueBlockNum=12
isValidator=false
[store]
name="kvdb"
driver="leveldb"
dbPath="datadir/mavltree"
dbCache=128
[store.sub.kvdb]
enableMavlPrefix=false
enableMVCC=false
[wallet]
minFee=100000
driver="leveldb"
dbPath="wallet"
dbCache=16
signType="secp256k1"
[wallet.sub.ticket]
minerdisable=false
minerwhitelist=["*"]
[exec]
isFree=false
minExecFee=100000
enableStat=false
enableMVCC=false
alias=["token1:token","token2:token","token3:token"]
saveTokenTxList=false
[exec.sub.cert]
# 是否启用证书验证和签名
enable=false
# 加密文件路径
cryptoPath="authdir/crypto"
# 带证书签名类型,支持"auth_ecdsa", "auth_sm2"
signType="auth_ecdsa"
This diff is collapsed.
This diff is collapsed.
{"genesis_time":"0001-01-01T00:00:00Z","chain_id":"test-chain-Ep9EcD","validators":[{"pub_key":{"type":"ed25519","data":"220ACBE680DF2473A0CB48987A00FCC1812F106A7390BE6B8E2D31122C992A19"},"name":""}],"app_hash":""}
This diff is collapsed.
This diff is collapsed.
{"address":"02A13174B92727C4902DB099E51A3339F48BD45E","pub_key":{"type":"ed25519","data":"220ACBE680DF2473A0CB48987A00FCC1812F106A7390BE6B8E2D31122C992A19"},"priv_key":{"type":"ed25519","data":"B3DC4C0725884EBB7264B92F1D8D37584A64ADE1799D997EC64B4FE3973E08DE220ACBE680DF2473A0CB48987A00FCC1812F106A7390BE6B8E2D31122C992A19"}}
\ No newline at end of file
all:
sh ./create_protobuf.sh
#!/bin/sh
protoc --go_out=plugins=grpc:../types ./*.proto --proto_path=. --proto_path="../types/"
syntax = "proto3";
package types;
message VoteItem {
int32 votedNodeIndex = 1; //被投票的节点索引
bytes votedNodeAddress = 2; //被投票的节点地址
int64 cycleStart = 3; //大周期起始时间
int64 cycleStop = 4; //大周期终止时间
int64 periodStart = 5; //新节点负责出块的起始时间
int64 periodStop = 6; //新节点负责出块的终止时间
int64 height = 7; //新节点负责出块的起始高度
bytes voteId = 8; //选票ID
}
//DPosVote Dpos共识的节点投票,为达成共识用。
message DPosVote {
VoteItem voteItem = 1;
int64 voteTimestamp = 2; //发起投票的时间
int32 voterNodeIndex = 3; //投票节点索引
bytes voterNodeAddress = 4; //投票节点地址
bytes signature = 5; //投票者签名
}
message DPosVoteReply {
DPosVote vote = 1;
}
//DPosNotify Dpos委托节点出块周期结束时,通知其他节点进行高度确认及新节点投票。
message DPosNotify {
VoteItem vote = 1;
int64 heightStop = 2; //新节点负责出块的结束高度
bytes hashStop = 3; //新节点负责出块的结束hash
int64 notifyTimestamp = 4; //发起通知的时间
int32 notifyNodeIndex = 5; //通知节点的索引
bytes notifyNodeAddress= 6; //通知节点的地址
bytes signature = 7; //通知节点的签名
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tendermint Uses nacl's secret_box to encrypt a net.Conn.
// It is (meant to be) an implementation of the STS protocol.
// Note we do not (yet) assume that a remote peer's pubkey
// is known ahead of time, and thus we are technically
// still vulnerable to MITM. (TODO!)
// See docs/sts-final.pdf for more info
package dpos
import (
"bytes"
crand "crypto/rand"
"crypto/sha256"
"encoding/binary"
"errors"
"io"
"net"
"time"
"github.com/33cn/chain33/common/crypto"
"github.com/33cn/plugin/plugin/consensus/dpos/types"
"golang.org/x/crypto/nacl/box"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/ripemd160"
)
// 2 + 1024 == 1026 total frame size
const (
dataLenSize = 2 // uint16 to describe the length, is <= dataMaxSize
dataMaxSize = 1024
totalFrameSize = dataMaxSize + dataLenSize
sealedFrameSize = totalFrameSize + secretbox.Overhead
authSigMsgSize = (32) + (64)
) // fixed size (length prefixed) byte arrays
// SecretConnection Implements net.Conn
type SecretConnection struct {
conn io.ReadWriteCloser
recvBuffer []byte
recvNonce *[24]byte
sendNonce *[24]byte
remPubKey crypto.PubKey
shrSecret *[32]byte // shared secret
}
// MakeSecretConnection Performs handshake and returns a new authenticated SecretConnection.
// Returns nil if error in handshake.
// Caller should call conn.Close()
// See docs/sts-final.pdf for more information.
func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (*SecretConnection, error) {
locPubKey := locPrivKey.PubKey()
// Generate ephemeral keys for perfect forward secrecy.
locEphPub, locEphPriv := genEphKeys()
// Write local ephemeral pubkey and receive one too.
// NOTE: every 32-byte string is accepted as a Curve25519 public key
// (see DJB's Curve25519 paper: http://cr.yp.to/ecdh/curve25519-20060209.pdf)
remEphPub, err := shareEphPubKey(conn, locEphPub)
if err != nil {
return nil, err
}
// Compute common shared secret.
shrSecret := computeSharedSecret(remEphPub, locEphPriv)
// Sort by lexical order.
loEphPub, hiEphPub := sort32(locEphPub, remEphPub)
// Check if the local ephemeral public key
// was the least, lexicographically sorted.
locIsLeast := bytes.Equal(locEphPub[:], loEphPub[:])
// Generate nonces to use for secretbox.
recvNonce, sendNonce := genNonces(loEphPub, hiEphPub, locIsLeast)
// Generate common challenge to sign.
challenge := genChallenge(loEphPub, hiEphPub)
// Construct SecretConnection.
sc := &SecretConnection{
conn: conn,
recvBuffer: nil,
recvNonce: recvNonce,
sendNonce: sendNonce,
shrSecret: shrSecret,
}
// Sign the challenge bytes for authentication.
locSignature := signChallenge(challenge, locPrivKey)
// Share (in secret) each other's pubkey & challenge signature
authSigMsg, err := shareAuthSignature(sc, locPubKey, locSignature)
if err != nil {
return nil, err
}
remPubKey, remSignature := authSigMsg.Key, authSigMsg.Sig
if !remPubKey.VerifyBytes(challenge[:], remSignature) {
return nil, errors.New("Challenge verification failed")
}
// We've authorized.
sc.remPubKey = remPubKey
return sc, nil
}
// RemotePubKey Returns authenticated remote pubkey
func (sc *SecretConnection) RemotePubKey() crypto.PubKey {
return sc.remPubKey
}
// Writes encrypted frames of `sealedFrameSize`
// CONTRACT: data smaller than dataMaxSize is read atomically.
func (sc *SecretConnection) Write(data []byte) (n int, err error) {
for 0 < len(data) {
var frame = make([]byte, totalFrameSize)
var chunk []byte
if dataMaxSize < len(data) {
chunk = data[:dataMaxSize]
data = data[dataMaxSize:]
} else {
chunk = data
data = nil
}
chunkLength := len(chunk)
binary.BigEndian.PutUint16(frame, uint16(chunkLength))
copy(frame[dataLenSize:], chunk)
// encrypt the frame
var sealedFrame = make([]byte, sealedFrameSize)
secretbox.Seal(sealedFrame[:0], frame, sc.sendNonce, sc.shrSecret)
// fmt.Printf("secretbox.Seal(sealed:%X,sendNonce:%X,shrSecret:%X\n", sealedFrame, sc.sendNonce, sc.shrSecret)
incr2Nonce(sc.sendNonce)
// end encryption
_, err := sc.conn.Write(sealedFrame)
if err != nil {
return n, err
}
n += len(chunk)
}
return
}
// CONTRACT: data smaller than dataMaxSize is read atomically.
func (sc *SecretConnection) Read(data []byte) (n int, err error) {
if 0 < len(sc.recvBuffer) {
count := copy(data, sc.recvBuffer)
sc.recvBuffer = sc.recvBuffer[count:]
return
}
sealedFrame := make([]byte, sealedFrameSize)
_, err = io.ReadFull(sc.conn, sealedFrame)
if err != nil {
return
}
// decrypt the frame
var frame = make([]byte, totalFrameSize)
// fmt.Printf("secretbox.Open(sealed:%X,recvNonce:%X,shrSecret:%X\n", sealedFrame, sc.recvNonce, sc.shrSecret)
_, ok := secretbox.Open(frame[:0], sealedFrame, sc.recvNonce, sc.shrSecret)
if !ok {
return n, errors.New("Failed to decrypt SecretConnection")
}
incr2Nonce(sc.recvNonce)
// end decryption
var chunkLength = binary.BigEndian.Uint16(frame) // read the first two bytes
if chunkLength > dataMaxSize {
return 0, errors.New("chunkLength is greater than dataMaxSize")
}
var chunk = frame[dataLenSize : dataLenSize+chunkLength]
n = copy(data, chunk)
sc.recvBuffer = chunk[n:]
return
}
// Close Implements net.Conn
func (sc *SecretConnection) Close() error { return sc.conn.Close() }
// LocalAddr ...
func (sc *SecretConnection) LocalAddr() net.Addr { return sc.conn.(net.Conn).LocalAddr() }
// RemoteAddr ...
func (sc *SecretConnection) RemoteAddr() net.Addr { return sc.conn.(net.Conn).RemoteAddr() }
// SetDeadline ...
func (sc *SecretConnection) SetDeadline(t time.Time) error { return sc.conn.(net.Conn).SetDeadline(t) }
// SetReadDeadline ...
func (sc *SecretConnection) SetReadDeadline(t time.Time) error {
return sc.conn.(net.Conn).SetReadDeadline(t)
}
// SetWriteDeadline ...
func (sc *SecretConnection) SetWriteDeadline(t time.Time) error {
return sc.conn.(net.Conn).SetWriteDeadline(t)
}
func genEphKeys() (ephPub, ephPriv *[32]byte) {
var err error
ephPub, ephPriv, err = box.GenerateKey(crand.Reader)
if err != nil {
types.PanicCrisis("Could not generate ephemeral keypairs")
}
return
}
func shareEphPubKey(conn io.ReadWriter, locEphPub *[32]byte) (remEphPub *[32]byte, err error) {
var err1, err2 error
Parallel(
func() {
_, err1 = conn.Write(locEphPub[:])
},
func() {
remEphPub = new([32]byte)
_, err2 = io.ReadFull(conn, remEphPub[:])
},
)
if err1 != nil {
return nil, err1
}
if err2 != nil {
return nil, err2
}
return remEphPub, nil
}
func computeSharedSecret(remPubKey, locPrivKey *[32]byte) (shrSecret *[32]byte) {
shrSecret = new([32]byte)
box.Precompute(shrSecret, remPubKey, locPrivKey)
return
}
func sort32(foo, bar *[32]byte) (lo, hi *[32]byte) {
if bytes.Compare(foo[:], bar[:]) < 0 {
lo = foo
hi = bar
} else {
lo = bar
hi = foo
}
return
}
func genNonces(loPubKey, hiPubKey *[32]byte, locIsLo bool) (recvNonce, sendNonce *[24]byte) {
nonce1 := hash24(append(loPubKey[:], hiPubKey[:]...))
nonce2 := new([24]byte)
copy(nonce2[:], nonce1[:])
nonce2[len(nonce2)-1] ^= 0x01
if locIsLo {
recvNonce = nonce1
sendNonce = nonce2
} else {
recvNonce = nonce2
sendNonce = nonce1
}
return
}
func genChallenge(loPubKey, hiPubKey *[32]byte) (challenge *[32]byte) {
return hash32(append(loPubKey[:], hiPubKey[:]...))
}
func signChallenge(challenge *[32]byte, locPrivKey crypto.PrivKey) (signature crypto.Signature) {
signature = locPrivKey.Sign(challenge[:])
return
}
type authSigMessage struct {
Key crypto.PubKey
Sig crypto.Signature
}
func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature crypto.Signature) (*authSigMessage, error) {
var recvMsg authSigMessage
var err1, err2 error
Parallel(
func() {
msgByte := make([]byte, len(pubKey.Bytes())+len(signature.Bytes()))
copy(msgByte, pubKey.Bytes())
copy(msgByte[len(pubKey.Bytes()):], signature.Bytes())
_, err1 = sc.Write(msgByte)
},
func() {
readBuffer := make([]byte, authSigMsgSize)
_, err2 = io.ReadFull(sc, readBuffer)
if err2 != nil {
return
}
//n := int(0) // not used.
//recvMsg = wire.ReadBinary(authSigMessage{}, bytes.NewBuffer(readBuffer), authSigMsgSize, &n, &err2).(authSigMessage)
//secret.Info("shareAuthSignature", "readBuffer", readBuffer)
recvMsg.Key, err2 = types.ConsensusCrypto.PubKeyFromBytes(readBuffer[:32])
if err2 != nil {
return
}
recvMsg.Sig, err2 = types.ConsensusCrypto.SignatureFromBytes(readBuffer[32:])
if err2 != nil {
return
}
})
if err1 != nil {
return nil, err1
}
if err2 != nil {
return nil, err2
}
return &recvMsg, nil
}
//--------------------------------------------------------------------------------
// sha256
func hash32(input []byte) (res *[32]byte) {
hasher := sha256.New()
_, err := hasher.Write(input) // nolint: errcheck, gas
if err != nil {
panic(err)
}
resSlice := hasher.Sum(nil)
res = new([32]byte)
copy(res[:], resSlice)
return
}
// We only fill in the first 20 bytes with ripemd160
func hash24(input []byte) (res *[24]byte) {
hasher := ripemd160.New()
_, err := hasher.Write(input) // nolint: errcheck, gas
if err != nil {
panic(err)
}
resSlice := hasher.Sum(nil)
res = new([24]byte)
copy(res[:], resSlice)
return
}
// increment nonce big-endian by 2 with wraparound.
func incr2Nonce(nonce *[24]byte) {
incrNonce(nonce)
incrNonce(nonce)
}
// increment nonce big-endian by 1 with wraparound.
func incrNonce(nonce *[24]byte) {
for i := 23; 0 <= i; i-- {
nonce[i]++
if nonce[i] != 0 {
return
}
}
}
This diff is collapsed.
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dpos
import (
"fmt"
"github.com/33cn/chain33/types"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc"
"math/rand"
"testing"
"time"
_ "github.com/33cn/chain33/system"
_ "github.com/33cn/plugin/plugin/dapp/init"
_ "github.com/33cn/plugin/plugin/store/init"
)
var (
random *rand.Rand
loopCount = 10
conn *grpc.ClientConn
c types.Chain33Client
)
func init() {
setParams(3, 3, 6)
}
func setParams(delegateNum int64, blockInterval int64, continueBlockNum int64) {
dposDelegateNum = delegateNum //委托节点个数,从配置读取,以后可以根据投票结果来定
dposBlockInterval = blockInterval //出块间隔,当前按3s
dposContinueBlockNum = continueBlockNum //一个委托节点当选后,一次性持续出块数量
dposCycle = int64(dposDelegateNum * dposBlockInterval * dposContinueBlockNum)
dposPeriod = int64(dposBlockInterval * dposContinueBlockNum)
}
func printTask(now int64, task *DPosTask){
fmt.Printf("now:%v|cycleStart:%v|cycleStop:%v|periodStart:%v|periodStop:%v|blockStart:%v|blockStop:%v|nodeId:%v\n",
now,
task.cycleStart,
task.cycleStop,
task.periodStart,
task.periodStop,
task.blockStart,
task.blockStop,
task.nodeId)
}
func assertTask(task *DPosTask, t*testing.T){
assert.Equal(t, true, task.nodeId >= 0 && task.nodeId < dposDelegateNum)
assert.Equal(t, true, task.cycleStart <= task.periodStart && task.periodStart <= task.blockStart && task.blockStop <= task.periodStop && task.periodStop <= task.cycleStop)
}
func TestDecideTaskByTime(t *testing.T) {
now := time.Now().Unix()
task := DecideTaskByTime(now)
printTask(now, &task)
assertTask(&task, t)
setParams(2, 1, 6)
now = time.Now().Unix()
task = DecideTaskByTime(now)
printTask(now, &task)
assertTask(&task, t)
setParams(21, 1, 12)
now = time.Now().Unix()
task = DecideTaskByTime(now)
printTask(now, &task)
assertTask(&task, t)
setParams(21, 2, 12)
now = time.Now().Unix()
task = DecideTaskByTime(now)
printTask(now, &task)
assertTask(&task, t)
setParams(2, 3, 12)
for i := 0; i < 120; i++ {
now = time.Now().Unix()
task = DecideTaskByTime(now)
printTask(now, &task)
assertTask(&task, t)
time.Sleep(time.Second * 1)
}
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dpos
import (
"time"
)
var (
tickTockBufferSize = 10
)
// TimeoutTicker is a timer that schedules timeouts
// conditional on the height/round/step in the timeoutInfo.
// The timeoutInfo.Duration may be non-positive.
type TimeoutTicker interface {
Start()
Stop()
Chan() <-chan timeoutInfo // on which to receive a timeout
ScheduleTimeout(ti timeoutInfo) // reset the timer
}
// timeoutTicker wraps time.Timer,
// scheduling timeouts only for greater height/round/step
// than what it's already seen.
// Timeouts are scheduled along the tickChan,
// and fired on the tockChan.
type timeoutTicker struct {
timer *time.Timer
tickChan chan timeoutInfo // for scheduling timeouts
tockChan chan timeoutInfo // for notifying about them
}
// NewTimeoutTicker returns a new TimeoutTicker.
func NewTimeoutTicker() TimeoutTicker {
tt := &timeoutTicker{
timer: time.NewTimer(0),
tickChan: make(chan timeoutInfo, tickTockBufferSize),
tockChan: make(chan timeoutInfo, tickTockBufferSize),
}
tt.stopTimer() // don't want to fire until the first scheduled timeout
return tt
}
// OnStart implements cmn.Service. It starts the timeout routine.
func (t *timeoutTicker) Start() {
go t.timeoutRoutine()
}
// OnStop implements cmn.Service. It stops the timeout routine.
func (t *timeoutTicker) Stop() {
t.stopTimer()
}
// Chan returns a channel on which timeouts are sent.
func (t *timeoutTicker) Chan() <-chan timeoutInfo {
return t.tockChan
}
// ScheduleTimeout schedules a new timeout by sending on the internal tickChan.
// The timeoutRoutine is always available to read from tickChan, so this won't block.
// The scheduling may fail if the timeoutRoutine has already scheduled a timeout for a later height/round/step.
func (t *timeoutTicker) ScheduleTimeout(ti timeoutInfo) {
t.tickChan <- ti
}
//-------------------------------------------------------------
// stop the timer and drain if necessary
func (t *timeoutTicker) stopTimer() {
// Stop() returns false if it was already fired or was stopped
if !t.timer.Stop() {
select {
case <-t.timer.C:
default:
dposlog.Debug("Timer already stopped")
}
}
}
// send on tickChan to start a new timer.
// timers are interupted and replaced by new ticks from later steps
// timeouts of 0 on the tickChan will be immediately relayed to the tockChan
func (t *timeoutTicker) timeoutRoutine() {
dposlog.Debug("Starting timeout routine")
var ti timeoutInfo
for {
select {
case newti := <-t.tickChan:
dposlog.Debug("Received tick", "old_ti", ti, "new_ti", newti)
// stop the last timer
t.stopTimer()
// update timeoutInfo and reset timer
// NOTE time.Timer allows duration to be non-positive
ti = newti
t.timer.Reset(ti.Duration)
dposlog.Debug("Scheduled timeout", "dur", ti.Duration)
case <-t.timer.C:
dposlog.Info("Timed out", "dur", ti.Duration, "state", StateTypeMapping[ti.State])
// go routine here guarantees timeoutRoutine doesn't block.
// Determinism comes from playback in the receiveRoutine.
// We can eliminate it by merging the timeoutRoutine into receiveRoutine
// and managing the timeouts ourselves with a millisecond ticker
go func(toi timeoutInfo) { t.tockChan <- toi }(ti)
}
}
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types
import (
"encoding/json"
"io/ioutil"
"time"
"github.com/pkg/errors"
)
//------------------------------------------------------------
// core types for a genesis definition
// GenesisValidator is an initial validator.
type GenesisValidator struct {
PubKey KeyText `json:"pub_key"`
Name string `json:"name"`
}
// GenesisDoc defines the initial conditions for a tendermint blockchain, in particular its validator set.
type GenesisDoc struct {
GenesisTime time.Time `json:"genesis_time"`
ChainID string `json:"chain_id"`
Validators []GenesisValidator `json:"validators"`
AppHash []byte `json:"app_hash"`
AppOptions interface{} `json:"app_options,omitempty"`
}
// SaveAs is a utility method for saving GenensisDoc as a JSON file.
func (genDoc *GenesisDoc) SaveAs(file string) error {
genDocBytes, err := json.Marshal(genDoc)
if err != nil {
return err
}
return WriteFile(file, genDocBytes, 0644)
}
// ValidatorHash returns the hash of the validator set contained in the GenesisDoc
func (genDoc *GenesisDoc) ValidatorHash() []byte {
vals := make([]*Validator, len(genDoc.Validators))
for i, v := range genDoc.Validators {
if len(v.PubKey.Data) == 0 {
panic(Fmt("ValidatorHash pubkey of validator[%v] in gendoc is empty", i))
}
pubkey, err := PubKeyFromString(v.PubKey.Data)
if err != nil {
panic(Fmt("ValidatorHash PubKeyFromBytes failed:%v", err))
}
vals[i] = NewValidator(pubkey)
}
vset := NewValidatorSet(vals)
return vset.Hash()
}
// ValidateAndComplete checks that all necessary fields are present
// and fills in defaults for optional fields left empty
func (genDoc *GenesisDoc) ValidateAndComplete() error {
if genDoc.ChainID == "" {
return errors.Errorf("Genesis doc must include non-empty chain_id")
}
if len(genDoc.Validators) == 0 {
return errors.Errorf("The genesis file must have at least one validator")
}
if genDoc.GenesisTime.IsZero() {
genDoc.GenesisTime = time.Now()
}
return nil
}
//------------------------------------------------------------
// Make genesis state from file
// GenesisDocFromJSON unmarshalls JSON data into a GenesisDoc.
func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) {
genDoc := GenesisDoc{}
err := json.Unmarshal(jsonBlob, &genDoc)
if err != nil {
return nil, err
}
if err := genDoc.ValidateAndComplete(); err != nil {
return nil, err
}
return &genDoc, err
}
// GenesisDocFromFile reads JSON data from a file and unmarshalls it into a GenesisDoc.
func GenesisDocFromFile(genDocFile string) (*GenesisDoc, error) {
jsonBlob, err := ioutil.ReadFile(genDocFile)
if err != nil {
return nil, errors.Wrap(err, "Couldn't read GenesisDoc file")
}
genDoc, err := GenesisDocFromJSON(jsonBlob)
if err != nil {
return nil, errors.Wrap(err, Fmt("Error reading GenesisDoc at %v", genDocFile))
}
return genDoc, nil
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types
import (
"time"
"reflect"
)
var (
// MsgMap define
MsgMap map[byte]reflect.Type
)
// step and message id define
const (
VoteID = byte(0x06)
VoteReplyID = byte(0x07)
NotifyID = byte(0x08)
PacketTypePing = byte(0xff)
PacketTypePong = byte(0xfe)
)
// InitMessageMap ...
func InitMessageMap() {
MsgMap = map[byte]reflect.Type{
VoteID: reflect.TypeOf(DPosVote{}),
VoteReplyID: reflect.TypeOf(DPosVoteReply{}),
NotifyID: reflect.TypeOf(DPosNotify{}),
}
}
//---------------------Canonical json-----------------------------------
// CanonicalJSONVote ...
type CanonicalJSONVoteItem struct {
VotedNodeIndex int32 `json:"votedNodeIndex,omitempty"`
VotedNodeAddress []byte `json:"votedNodeAddress,omitempty"`
CycleStart int64 `json:"cycleStart,omitempty"`
CycleStop int64 `json:"cycleStop,omitempty"`
PeriodStart int64 `json:"periodStart,omitempty"`
PeriodStop int64 `json:"periodStop,omitempty"`
Height int64 `json:"height,omitempty"`
VoteId []byte `json:"voteId,omitempty"`
}
type CanonicalJSONVote struct {
VoteItem *CanonicalJSONVoteItem `json:"vote,omitempty"`
VoteTimestamp int64 `json:"voteTimestamp,omitempty"`
VoterNodeIndex int32 `json:"voterNodeIndex,omitempty"`
VoterNodeAddress []byte `json:"voterNodeAddress,omitempty"`
}
// CanonicalJSONOnceVote ...
type CanonicalJSONOnceVote struct {
ChainID string `json:"chain_id"`
Vote CanonicalJSONVote `json:"vote"`
}
// CanonicalVote ...
func CanonicalVote(vote *Vote) CanonicalJSONVote {
return CanonicalJSONVote{
VoteItem: &CanonicalJSONVoteItem{
VotedNodeIndex: vote.VoteItem.VotedNodeIndex,
VotedNodeAddress: vote.VoteItem.VotedNodeAddress,
CycleStart: vote.VoteItem.CycleStart,
CycleStop: vote.VoteItem.CycleStop,
PeriodStart: vote.VoteItem.PeriodStart,
PeriodStop: vote.VoteItem.PeriodStop,
Height: vote.VoteItem.Height,
VoteId: vote.VoteItem.VoteId,
},
VoteTimestamp: vote.VoteTimestamp,
VoterNodeIndex: vote.VoterNodeIndex,
VoterNodeAddress: vote.VoterNodeAddress,
}
}
type CanonicalJSONNotify struct {
VoteItem *CanonicalJSONVoteItem `json:"vote,omitempty"`
HeightStop int64 `json:"heightStop,omitempty"`
NotifyTimestamp int64 `json:"notifyTimestamp,omitempty"`
}
// CanonicalJSONOnceVote ...
type CanonicalJSONOnceNotify struct {
ChainID string `json:"chain_id"`
Notify CanonicalJSONNotify `json:"vote"`
}
// CanonicalVote ...
func CanonicalNotify(notify *Notify) CanonicalJSONNotify {
return CanonicalJSONNotify{
VoteItem: &CanonicalJSONVoteItem{
VotedNodeIndex: notify.Vote.VotedNodeIndex,
VotedNodeAddress: notify.Vote.VotedNodeAddress,
CycleStart: notify.Vote.CycleStart,
CycleStop: notify.Vote.CycleStop,
PeriodStart: notify.Vote.PeriodStart,
PeriodStop: notify.Vote.PeriodStop,
Height: notify.Vote.Height,
VoteId: notify.Vote.VoteId,
},
HeightStop: notify.HeightStop,
NotifyTimestamp: notify.NotifyTimestamp,
}
}
// CanonicalTime ...
func CanonicalTime(t time.Time) string {
// note that sending time over go-wire resets it to
// local time, we need to force UTC here, so the
// signatures match
return t.UTC().Format(timeFormat)
}
This diff is collapsed.
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"github.com/33cn/chain33/common/crypto"
"github.com/33cn/chain33/common/log/log15"
"io"
"time"
)
// error defines
var (
ErrNotifyInvalidValidatorAddress = errors.New("Invalid validator address for notify")
ErrNotifyInvalidValidatorIndex = errors.New("Invalid validator index for notify")
ErrNotifyInvalidSignature = errors.New("Invalid notify signature")
ErrVoteInvalidValidatorIndex = errors.New("Invalid validator index for vote")
ErrVoteInvalidValidatorAddress = errors.New("Invalid validator address for vote")
ErrVoteInvalidSignature = errors.New("Invalid vote signature")
ErrVoteNil = errors.New("Nil vote")
votelog = log15.New("module", "tendermint-vote")
ConsensusCrypto crypto.Crypto
)
// Signable is an interface for all signable things.
// It typically removes signatures before serializing.
type Signable interface {
WriteSignBytes(chainID string, w io.Writer, n *int, err *error)
}
// SignBytes is a convenience method for getting the bytes to sign of a Signable.
func SignBytes(chainID string, o Signable) []byte {
buf, n, err := new(bytes.Buffer), new(int), new(error)
o.WriteSignBytes(chainID, buf, n, err)
if *err != nil {
PanicCrisis(err)
}
return buf.Bytes()
}
// Vote Represents a vote from validators for consensus.
type Vote struct {
*DPosVote
}
// WriteSignBytes ...
func (vote *Vote) WriteSignBytes(chainID string, w io.Writer, n *int, err *error) {
if *err != nil {
return
}
canonical := CanonicalJSONOnceVote{
chainID,
CanonicalVote(vote),
}
byteVote, e := json.Marshal(&canonical)
if e != nil {
*err = e
votelog.Error("vote WriteSignBytes marshal failed", "err", e)
return
}
number, writeErr := w.Write(byteVote)
*n = number
*err = writeErr
}
// Copy ...
func (vote *Vote) Copy() *Vote {
voteCopy := *vote
return &voteCopy
}
func (vote *Vote) String() string {
if vote == nil {
return "nil-Vote"
}
return fmt.Sprintf("Vote{VotedNodeIndex:%v, VotedNodeAddr:%X,Cycle[%v,%v],Period[%v,%v],StartHeight:%v,VoteId:%X,VoteTimeStamp:%v,VoteNodeIndex:%v,VoteNodeAddr:%X,Sig:%X}",
vote.VoteItem.VotedNodeIndex,
Fingerprint(vote.VoteItem.VotedNodeAddress),
vote.VoteItem.CycleStart,
vote.VoteItem.CycleStop,
vote.VoteItem.PeriodStart,
vote.VoteItem.PeriodStop,
vote.VoteItem.Height,
Fingerprint(vote.VoteItem.VoteId),
CanonicalTime(time.Unix(0, vote.VoteTimestamp)),
vote.VoterNodeIndex,
Fingerprint(vote.VoterNodeAddress),
Fingerprint(vote.Signature),
)
}
// Verify ...
func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error {
addr := GenAddressByPubKey(pubKey)
if !bytes.Equal(addr, vote.VoterNodeAddress) {
return ErrVoteInvalidValidatorAddress
}
sig, err := ConsensusCrypto.SignatureFromBytes(vote.Signature)
if err != nil {
votelog.Error("vote Verify failed", "err", err)
return err
}
if !pubKey.VerifyBytes(SignBytes(chainID, vote), sig) {
return ErrVoteInvalidSignature
}
return nil
}
// Hash ...
func (vote *Vote) Hash() []byte {
if vote == nil {
//votelog.Error("vote hash is nil")
return nil
}
bytes, err := json.Marshal(vote)
if err != nil {
votelog.Error("vote hash marshal failed", "err", err)
return nil
}
return crypto.Ripemd160(bytes)
}
// Vote Represents a vote from validators for consensus.
type Notify struct {
*DPosNotify
}
// WriteSignBytes ...
func (notify *Notify) WriteSignBytes(chainID string, w io.Writer, n *int, err *error) {
if *err != nil {
return
}
canonical := CanonicalJSONOnceNotify{
chainID,
CanonicalNotify(notify),
}
byteVote, e := json.Marshal(&canonical)
if e != nil {
*err = e
votelog.Error("vote WriteSignBytes marshal failed", "err", e)
return
}
number, writeErr := w.Write(byteVote)
*n = number
*err = writeErr
}
// Copy ...
func (notify *Notify) Copy() *Notify {
notifyCopy := *notify
return &notifyCopy
}
func (notify *Notify) String() string {
if notify == nil {
return "nil-notify"
}
return fmt.Sprintf("Notify{VotedNodeIndex:%v, VotedNodeAddr:%X,Cycle[%v,%v],Period[%v,%v],StartHeight:%v,VoteId:%X,NotifyTimeStamp:%v,HeightStop:%v,NotifyNodeIndex:%v,NotifyNodeAddr:%X,Sig:%X}",
notify.Vote.VotedNodeIndex,
Fingerprint(notify.Vote.VotedNodeAddress),
notify.Vote.CycleStart,
notify.Vote.CycleStop,
notify.Vote.PeriodStart,
notify.Vote.PeriodStop,
notify.Vote.Height,
Fingerprint(notify.Vote.VoteId),
CanonicalTime(time.Unix(0, notify.NotifyTimestamp)),
notify.HeightStop,
notify.NotifyNodeIndex,
Fingerprint(notify.NotifyNodeAddress),
Fingerprint(notify.Signature),
)
}
// Verify ...
func (notify *Notify) Verify(chainID string, pubKey crypto.PubKey) error {
addr := GenAddressByPubKey(pubKey)
if !bytes.Equal(addr, notify.NotifyNodeAddress) {
return ErrNotifyInvalidValidatorAddress
}
sig, err := ConsensusCrypto.SignatureFromBytes(notify.Signature)
if err != nil {
votelog.Error("Notify Verify failed", "err", err)
return err
}
if !pubKey.VerifyBytes(SignBytes(chainID, notify), sig) {
return ErrNotifyInvalidSignature
}
return nil
}
// Hash ...
func (notify *Notify) Hash() []byte {
if notify == nil {
//votelog.Error("vote hash is nil")
return nil
}
bytes, err := json.Marshal(notify)
if err != nil {
votelog.Error("vote hash marshal failed", "err", err)
return nil
}
return crypto.Ripemd160(bytes)
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types
import (
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"strconv"
"sync"
"syscall"
"time"
)
const (
// RFC3339Millis ...
RFC3339Millis = "2006-01-02T15:04:05.000Z" // forced microseconds
timeFormat = RFC3339Millis
)
var (
randgen *rand.Rand
randMux sync.Mutex
// Fmt ...
Fmt = fmt.Sprintf
)
// Init ...
func Init() {
if randgen == nil {
randMux.Lock()
randgen = rand.New(rand.NewSource(time.Now().UnixNano()))
randMux.Unlock()
}
}
// WriteFile ...
func WriteFile(filePath string, contents []byte, mode os.FileMode) error {
return ioutil.WriteFile(filePath, contents, mode)
}
// WriteFileAtomic ...
func WriteFileAtomic(filePath string, newBytes []byte, mode os.FileMode) error {
dir := filepath.Dir(filePath)
f, err := ioutil.TempFile(dir, "")
if err != nil {
return err
}
_, err = f.Write(newBytes)
if err == nil {
err = f.Sync()
}
if closeErr := f.Close(); err == nil {
err = closeErr
}
if permErr := os.Chmod(f.Name(), mode); err == nil {
err = permErr
}
if err == nil {
err = os.Rename(f.Name(), filePath)
}
// any err should result in full cleanup
if err != nil {
if er := os.Remove(f.Name()); er != nil {
fmt.Printf("WriteFileAtomic Remove failed:%v", er)
}
}
return err
}
// Tempfile ...
func Tempfile(prefix string) (*os.File, string) {
file, err := ioutil.TempFile("", prefix)
if err != nil {
panic(err)
}
return file, file.Name()
}
// Fingerprint ...
func Fingerprint(slice []byte) []byte {
fingerprint := make([]byte, 6)
copy(fingerprint, slice)
return fingerprint
}
// Kill ...
func Kill() error {
p, err := os.FindProcess(os.Getpid())
if err != nil {
return err
}
return p.Signal(syscall.SIGTERM)
}
// Exit ...
func Exit(s string) {
fmt.Printf(s + "\n")
os.Exit(1)
}
// Parallel ...
func Parallel(tasks ...func()) {
var wg sync.WaitGroup
wg.Add(len(tasks))
for _, task := range tasks {
go func(task func()) {
task()
wg.Done()
}(task)
}
wg.Wait()
}
// Percent represents a percentage in increments of 1/1000th of a percent.
type Percent uint32
// Float ...
func (p Percent) Float() float64 {
return float64(p) * 1e-3
}
func (p Percent) String() string {
var buf [12]byte
b := strconv.AppendUint(buf[:0], uint64(p)/1000, 10)
n := len(b)
b = strconv.AppendUint(b, 1000+uint64(p)%1000, 10)
b[n] = '.'
return string(append(b, '%'))
}
// MinInt ...
func MinInt(a, b int) int {
if a < b {
return a
}
return b
}
// MaxInt ...
func MaxInt(a, b int) int {
if a > b {
return a
}
return b
}
// RandIntn ...
func RandIntn(n int) int {
if n <= 0 {
panic("invalid argument to Intn")
}
if n <= 1<<31-1 {
randMux.Lock()
i32 := randgen.Int31n(int32(n))
randMux.Unlock()
return int(i32)
}
randMux.Lock()
i64 := randgen.Int63n(int64(n))
randMux.Unlock()
return int(i64)
}
// RandUint32 ...
func RandUint32() uint32 {
randMux.Lock()
u32 := randgen.Uint32()
randMux.Unlock()
return u32
}
// RandInt63n ...
func RandInt63n(n int64) int64 {
randMux.Lock()
i64 := randgen.Int63n(n)
randMux.Unlock()
return i64
}
// PanicSanity ...
func PanicSanity(v interface{}) {
panic(Fmt("Panicked on a Sanity Check: %v", v))
}
// PanicCrisis ...
func PanicCrisis(v interface{}) {
panic(Fmt("Panicked on a Crisis: %v", v))
}
// PanicQ ...
func PanicQ(v interface{}) {
panic(Fmt("Panicked questionably: %v", v))
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types
import (
"bytes"
"sort"
"strings"
"github.com/33cn/chain33/common/crypto"
"github.com/33cn/chain33/common/log/log15"
"github.com/33cn/chain33/common/merkle"
)
var validatorsetlog = log15.New("module", "dpos-val")
// Validator ...
type Validator struct {
Address []byte `json:"address"`
PubKey []byte `json:"pub_key"`
}
// NewValidator ...
func NewValidator(pubKey crypto.PubKey) *Validator {
return &Validator{
Address: GenAddressByPubKey(pubKey),
PubKey: pubKey.Bytes(),
}
}
// Copy Creates a new copy of the validator so we can mutate accum.
// Panics if the validator is nil.
func (v *Validator) Copy() *Validator {
vCopy := *v
return &vCopy
}
func (v *Validator) String() string {
if v == nil {
return "nil-Validator"
}
return Fmt("Validator{%v %v}",
v.Address,
v.PubKey)
}
// Hash computes the unique ID of a validator with a given voting power.
// It excludes the Accum value, which changes with every round.
func (v *Validator) Hash() []byte {
hashBytes := v.Address
hashBytes = append(hashBytes, v.PubKey...)
return crypto.Ripemd160(hashBytes)
}
// ValidatorSet represent a set of *Validator at a given height.
// The validators can be fetched by address or index.
// The index is in order of .Address, so the indices are fixed
// for all rounds of a given blockchain height.
// On the other hand, the .AccumPower of each validator and
// the designated .GetProposer() of a set changes every round,
// upon calling .IncrementAccum().
// NOTE: Not goroutine-safe.
// NOTE: All get/set to validators should copy the value for safety.
// TODO: consider validator Accum overflow
type ValidatorSet struct {
// NOTE: persisted via reflect, must be exported.
Validators []*Validator `json:"validators"`
}
// NewValidatorSet ...
func NewValidatorSet(vals []*Validator) *ValidatorSet {
validators := make([]*Validator, len(vals))
for i, val := range vals {
validators[i] = val.Copy()
}
sort.Sort(ValidatorsByAddress(validators))
vs := &ValidatorSet{
Validators: validators,
}
return vs
}
// Copy ...
func (valSet *ValidatorSet) Copy() *ValidatorSet {
validators := make([]*Validator, len(valSet.Validators))
for i, val := range valSet.Validators {
// NOTE: must copy, since IncrementAccum updates in place.
validators[i] = val.Copy()
}
return &ValidatorSet{
Validators: validators,
}
}
// HasAddress ...
func (valSet *ValidatorSet) HasAddress(address []byte) bool {
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
})
return idx != len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address)
}
// GetByAddress ...
func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) {
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
})
if idx != len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) {
return idx, valSet.Validators[idx].Copy()
}
return -1, nil
}
// GetByIndex returns the validator by index.
// It returns nil values if index < 0 or
// index >= len(ValidatorSet.Validators)
func (valSet *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) {
if index < 0 || index >= len(valSet.Validators) {
return nil, nil
}
val = valSet.Validators[index]
return val.Address, val.Copy()
}
// Size ...
func (valSet *ValidatorSet) Size() int {
return len(valSet.Validators)
}
// Hash ...
func (valSet *ValidatorSet) Hash() []byte {
if len(valSet.Validators) == 0 {
return nil
}
hashables := make([][]byte, len(valSet.Validators))
for i, val := range valSet.Validators {
hashables[i] = val.Hash()
}
return merkle.GetMerkleRoot(hashables)
}
// Add ...
func (valSet *ValidatorSet) Add(val *Validator) (added bool) {
val = val.Copy()
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(val.Address, valSet.Validators[i].Address) <= 0
})
if idx == len(valSet.Validators) {
valSet.Validators = append(valSet.Validators, val)
return true
} else if bytes.Equal(valSet.Validators[idx].Address, val.Address) {
return false
} else {
newValidators := make([]*Validator, len(valSet.Validators)+1)
copy(newValidators[:idx], valSet.Validators[:idx])
newValidators[idx] = val
copy(newValidators[idx+1:], valSet.Validators[idx:])
valSet.Validators = newValidators
return true
}
}
// Update ...
func (valSet *ValidatorSet) Update(val *Validator) (updated bool) {
index, sameVal := valSet.GetByAddress(val.Address)
if sameVal == nil {
return false
}
valSet.Validators[index] = val.Copy()
return true
}
// Remove ...
func (valSet *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) {
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
})
if idx == len(valSet.Validators) || !bytes.Equal(valSet.Validators[idx].Address, address) {
return nil, false
}
removedVal := valSet.Validators[idx]
newValidators := valSet.Validators[:idx]
if idx+1 < len(valSet.Validators) {
newValidators = append(newValidators, valSet.Validators[idx+1:]...)
}
valSet.Validators = newValidators
return removedVal, true
}
// Iterate ...
func (valSet *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) {
for i, val := range valSet.Validators {
stop := fn(i, val.Copy())
if stop {
break
}
}
}
func (valSet *ValidatorSet) String() string {
return valSet.StringIndented("")
}
// StringIndented ...
func (valSet *ValidatorSet) StringIndented(indent string) string {
if valSet == nil {
return "nil-ValidatorSet"
}
valStrings := []string{}
valSet.Iterate(func(index int, val *Validator) bool {
valStrings = append(valStrings, val.String())
return false
})
return Fmt(`ValidatorSet{
%s Validators:
%s %v
%s}`,
indent,
indent, strings.Join(valStrings, "\n"+indent+" "),
indent)
}
// Implements sort for sorting validators by address.
// ValidatorsByAddress ...
type ValidatorsByAddress []*Validator
func (vs ValidatorsByAddress) Len() int {
return len(vs)
}
func (vs ValidatorsByAddress) Less(i, j int) bool {
return bytes.Compare(vs[i].Address, vs[j].Address) == -1
}
func (vs ValidatorsByAddress) Swap(i, j int) {
it := vs[i]
vs[i] = vs[j]
vs[j] = it
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dpos
import (
"bytes"
"encoding/json"
"fmt"
ttypes "github.com/33cn/plugin/plugin/consensus/dpos/types"
"math/rand"
)
const fee = 1e6
var (
r *rand.Rand
)
// State is a short description of the latest committed block of the Tendermint consensus.
// It keeps all information necessary to validate new blocks,
// including the last validator set and the consensus params.
// All fields are exposed so the struct can be easily serialized,
// but none of them should be mutated directly.
// Instead, use state.Copy() or state.NextState(...).
// NOTE: not goroutine-safe.
type ValidatorMgr struct {
// Immutable
ChainID string
// Validators are persisted to the database separately every time they change,
// so we can query for historical validator sets.
// Note that if s.LastBlockHeight causes a valset change,
// we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1
Validators *ttypes.ValidatorSet
// The latest AppHash we've received from calling abci.Commit()
AppHash []byte
}
// Copy makes a copy of the State for mutating.
func (s ValidatorMgr) Copy() ValidatorMgr {
return ValidatorMgr{
ChainID: s.ChainID,
Validators: s.Validators.Copy(),
AppHash: s.AppHash,
}
}
// Equals returns true if the States are identical.
func (s ValidatorMgr) Equals(s2 ValidatorMgr) bool {
return bytes.Equal(s.Bytes(), s2.Bytes())
}
// Bytes serializes the State using go-wire.
func (s ValidatorMgr) Bytes() []byte {
sbytes, err := json.Marshal(s)
if err != nil {
fmt.Printf("Error reading GenesisDoc: %v", err)
return nil
}
return sbytes
}
// IsEmpty returns true if the State is equal to the empty State.
func (s ValidatorMgr) IsEmpty() bool {
return s.Validators == nil // XXX can't compare to Empty
}
// GetValidators returns the last and current validator sets.
func (s ValidatorMgr) GetValidators() (current *ttypes.ValidatorSet) {
return s.Validators
}
// MakeGenesisState creates state from ttypes.GenesisDoc.
func MakeGenesisValidatorMgr(genDoc *ttypes.GenesisDoc) (ValidatorMgr, error) {
err := genDoc.ValidateAndComplete()
if err != nil {
return ValidatorMgr{}, fmt.Errorf("Error in genesis file: %v", err)
}
// Make validators slice
validators := make([]*ttypes.Validator, len(genDoc.Validators))
for i, val := range genDoc.Validators {
pubKey, err := ttypes.PubKeyFromString(val.PubKey.Data)
if err != nil {
return ValidatorMgr{}, fmt.Errorf("Error validate[%v] in genesis file: %v", i, err)
}
// Make validator
validators[i] = &ttypes.Validator{
Address: ttypes.GenAddressByPubKey(pubKey),
PubKey: pubKey.Bytes(),
}
}
return ValidatorMgr{
ChainID: genDoc.ChainID,
Validators: ttypes.NewValidatorSet(validators),
AppHash: genDoc.AppHash,
}, nil
}
package init
import (
_ "github.com/33cn/plugin/plugin/consensus/dpos" //auto gen
_ "github.com/33cn/plugin/plugin/consensus/para" //auto gen
_ "github.com/33cn/plugin/plugin/consensus/pbft" //auto gen
_ "github.com/33cn/plugin/plugin/consensus/raft" //auto gen
......
......@@ -36,7 +36,7 @@ var r *rand.Rand
func createConn(ip string) {
var err error
url := ip + ":8802"
url := ip + ":9802"
fmt.Println("grpc url:", url)
conn, err = grpc.Dial(url, grpc.WithInsecure())
if err != nil {
......
......@@ -139,7 +139,7 @@ func Put(ip string, size string, privkey string) {
fmt.Fprintln(os.Stderr, err)
return
}
url := "http://" + ip + ":8801"
url := "http://" + ip + ":9801"
if privkey == "" {
_, priv := genaddress()
privkey = common.ToHex(priv.Bytes())
......
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: guess.proto
// source: dpos_msg.proto
package types
......@@ -2059,7 +2059,7 @@ func init() {
proto.RegisterType((*GuessGameRecords)(nil), "types.GuessGameRecords")
}
func init() { proto.RegisterFile("guess.proto", fileDescriptor_7574406c5d3430e8) }
func init() { proto.RegisterFile("dpos_msg.proto", fileDescriptor_7574406c5d3430e8) }
var fileDescriptor_7574406c5d3430e8 = []byte{
// 1367 bytes of a gzipped FileDescriptorProto
......@@ -2362,5 +2362,5 @@ var _Guess_serviceDesc = grpc.ServiceDesc{
},
},
Streams: []grpc.StreamDesc{},
Metadata: "guess.proto",
Metadata: "dpos_msg.proto",
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment