Commit c48b4c11 authored by 张振华's avatar 张振华

Merge branch 'dpos' of https://github.com/zzh33cn/plugin into store-perf-test

parents d25ddf55 36aeaf3d
Title="local"
[log]
# 日志级别,支持debug(dbug)/info/warn/error(eror)/crit
loglevel = "debug"
logConsoleLevel = "info"
# 日志文件名,可带目录,所有生成的日志文件都放到此目录下
logFile = "logs/chain33.log"
# 单个日志文件的最大值(单位:兆)
maxFileSize = 300
# 最多保存的历史日志文件个数
maxBackups = 100
# 最多保存的历史日志消息(单位:天)
maxAge = 28
# 日志文件名是否使用本地事件(否则使用UTC时间)
localTime = true
# 历史日志文件是否压缩(压缩格式为gz)
compress = true
# 是否打印调用源文件和行号
callerFile = false
# 是否打印调用方法
callerFunction = false
[blockchain]
defCacheSize=512
maxFetchBlockNum=128
timeoutSeconds=5
batchBlockNum=128
driver="leveldb"
dbPath="datadir"
dbCache=64
isStrongConsistency=true
singleMode=true
batchsync=false
enableTxQuickIndex=true
[p2p]
seeds=["127.0.0.1:13802"]
enable=true
isSeed=true
serverStart=true
innerSeedEnable=false
useGithub=false
innerBounds=300
msgCacheSize=10240
driver="leveldb"
dbPath="datadir/addrbook"
dbCache=4
grpcLogFile="grpc33.log"
version=199
verMix=199
verMax=199
[rpc]
jrpcBindAddr="localhost:8801"
grpcBindAddr="localhost:8802"
whitelist=["127.0.0.1"]
jrpcFuncWhitelist=["*"]
grpcFuncWhitelist=["*"]
[mempool]
name="timeline"
poolCacheSize=10240
minTxFee=100000
[consensus]
name="tendermint"
minerstart=false
[mver.consensus]
fundKeyAddr = "1BQXS6TxaYYG5mADaWij4AxhZZUTpw95a5"
coinReward = 18
coinDevFund = 12
ticketPrice = 10000
powLimitBits = "0x1f00ffff"
retargetAdjustmentFactor = 4
futureBlockTime = 16
ticketFrozenTime = 5 #5s only for test
ticketWithdrawTime = 10 #10s only for test
ticketMinerWaitTime = 2 #2s only for test
maxTxNumber = 1600 #160
targetTimespan = 2304
targetTimePerBlock = 16
[mver.consensus.ForkChainParamV1]
maxTxNumber = 10000
targetTimespan = 288 #only for test
targetTimePerBlock = 2
[mver.consensus.ForkChainParamV2]
powLimitBits = "0x1f2fffff"
[consensus.sub.dpos]
genesis="14KEKbYtKKQm4wMthSK9J4La4nAiidGozt"
genesisBlockTime=1514533394
timeoutCheckConnections=1000
timeoutVoting=3000
timeoutWaitNotify=2000
createEmptyBlocks=false
createEmptyBlocksInterval=0
validatorNodes=["127.0.0.1:46656"]
blockInterval=3
continueBlockNum=12
isValidator=false
[store]
name="kvdb"
driver="leveldb"
dbPath="datadir/mavltree"
dbCache=128
[store.sub.kvdb]
enableMavlPrefix=false
enableMVCC=false
[wallet]
minFee=100000
driver="leveldb"
dbPath="wallet"
dbCache=16
signType="secp256k1"
[wallet.sub.ticket]
minerdisable=false
minerwhitelist=["*"]
[exec]
isFree=false
minExecFee=100000
enableStat=false
enableMVCC=false
alias=["token1:token","token2:token","token3:token"]
saveTokenTxList=false
[exec.sub.cert]
# 是否启用证书验证和签名
enable=false
# 加密文件路径
cryptoPath="authdir/crypto"
# 带证书签名类型,支持"auth_ecdsa", "auth_sm2"
signType="auth_ecdsa"
This diff is collapsed.
This diff is collapsed.
{"genesis_time":"0001-01-01T00:00:00Z","chain_id":"test-chain-Ep9EcD","validators":[{"pub_key":{"type":"ed25519","data":"220ACBE680DF2473A0CB48987A00FCC1812F106A7390BE6B8E2D31122C992A19"},"name":""}],"app_hash":""}
This diff is collapsed.
This diff is collapsed.
{"address":"02A13174B92727C4902DB099E51A3339F48BD45E","pub_key":{"type":"ed25519","data":"220ACBE680DF2473A0CB48987A00FCC1812F106A7390BE6B8E2D31122C992A19"},"priv_key":{"type":"ed25519","data":"B3DC4C0725884EBB7264B92F1D8D37584A64ADE1799D997EC64B4FE3973E08DE220ACBE680DF2473A0CB48987A00FCC1812F106A7390BE6B8E2D31122C992A19"}}
\ No newline at end of file
all:
sh ./create_protobuf.sh
#!/bin/sh
protoc --go_out=plugins=grpc:../types ./*.proto --proto_path=. --proto_path="../types/"
syntax = "proto3";
package types;
message VoteItem {
int32 votedNodeIndex = 1; //被投票的节点索引
bytes votedNodeAddress = 2; //被投票的节点地址
int64 cycleStart = 3; //大周期起始时间
int64 cycleStop = 4; //大周期终止时间
int64 periodStart = 5; //新节点负责出块的起始时间
int64 periodStop = 6; //新节点负责出块的终止时间
int64 height = 7; //新节点负责出块的起始高度
bytes voteID = 8; //选票ID
}
//DPosVote Dpos共识的节点投票,为达成共识用。
message DPosVote {
VoteItem voteItem = 1;
int64 voteTimestamp = 2; //发起投票的时间
int32 voterNodeIndex = 3; //投票节点索引
bytes voterNodeAddress = 4; //投票节点地址
bytes signature = 5; //投票者签名
}
message DPosVoteReply {
DPosVote vote = 1;
}
//DPosNotify Dpos委托节点出块周期结束时,通知其他节点进行高度确认及新节点投票。
message DPosNotify {
VoteItem vote = 1;
int64 heightStop = 2; //新节点负责出块的结束高度
bytes hashStop = 3; //新节点负责出块的结束hash
int64 notifyTimestamp = 4; //发起通知的时间
int32 notifyNodeIndex = 5; //通知节点的索引
bytes notifyNodeAddress= 6; //通知节点的地址
bytes signature = 7; //通知节点的签名
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package dpos Uses nacl's secret_box to encrypt a net.Conn.
// It is (meant to be) an implementation of the STS protocol.
// Note we do not (yet) assume that a remote peer's pubkey
// is known ahead of time, and thus we are technically
// still vulnerable to MITM. (TODO!)
// See docs/sts-final.pdf for more info
package dpos
import (
"bytes"
crand "crypto/rand"
"crypto/sha256"
"encoding/binary"
"errors"
"io"
"net"
"time"
"github.com/33cn/chain33/common/crypto"
"github.com/33cn/plugin/plugin/consensus/dpos/types"
"golang.org/x/crypto/nacl/box"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/ripemd160"
)
// 2 + 1024 == 1026 total frame size
const (
dataLenSize = 2 // uint16 to describe the length, is <= dataMaxSize
dataMaxSize = 1024
totalFrameSize = dataMaxSize + dataLenSize
sealedFrameSize = totalFrameSize + secretbox.Overhead
authSigMsgSize = (32) + (64)
) // fixed size (length prefixed) byte arrays
// SecretConnection Implements net.Conn
type SecretConnection struct {
conn io.ReadWriteCloser
recvBuffer []byte
recvNonce *[24]byte
sendNonce *[24]byte
remPubKey crypto.PubKey
shrSecret *[32]byte // shared secret
}
// MakeSecretConnection Performs handshake and returns a new authenticated SecretConnection.
// Returns nil if error in handshake.
// Caller should call conn.Close()
// See docs/sts-final.pdf for more information.
func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (*SecretConnection, error) {
locPubKey := locPrivKey.PubKey()
// Generate ephemeral keys for perfect forward secrecy.
locEphPub, locEphPriv := genEphKeys()
// Write local ephemeral pubkey and receive one too.
// NOTE: every 32-byte string is accepted as a Curve25519 public key
// (see DJB's Curve25519 paper: http://cr.yp.to/ecdh/curve25519-20060209.pdf)
remEphPub, err := shareEphPubKey(conn, locEphPub)
if err != nil {
return nil, err
}
// Compute common shared secret.
shrSecret := computeSharedSecret(remEphPub, locEphPriv)
// Sort by lexical order.
loEphPub, hiEphPub := sort32(locEphPub, remEphPub)
// Check if the local ephemeral public key
// was the least, lexicographically sorted.
locIsLeast := bytes.Equal(locEphPub[:], loEphPub[:])
// Generate nonces to use for secretbox.
recvNonce, sendNonce := genNonces(loEphPub, hiEphPub, locIsLeast)
// Generate common challenge to sign.
challenge := genChallenge(loEphPub, hiEphPub)
// Construct SecretConnection.
sc := &SecretConnection{
conn: conn,
recvBuffer: nil,
recvNonce: recvNonce,
sendNonce: sendNonce,
shrSecret: shrSecret,
}
// Sign the challenge bytes for authentication.
locSignature := signChallenge(challenge, locPrivKey)
// Share (in secret) each other's pubkey & challenge signature
authSigMsg, err := shareAuthSignature(sc, locPubKey, locSignature)
if err != nil {
return nil, err
}
remPubKey, remSignature := authSigMsg.Key, authSigMsg.Sig
if !remPubKey.VerifyBytes(challenge[:], remSignature) {
return nil, errors.New("Challenge verification failed")
}
// We've authorized.
sc.remPubKey = remPubKey
return sc, nil
}
// RemotePubKey Returns authenticated remote pubkey
func (sc *SecretConnection) RemotePubKey() crypto.PubKey {
return sc.remPubKey
}
// Writes encrypted frames of `sealedFrameSize`
// CONTRACT: data smaller than dataMaxSize is read atomically.
func (sc *SecretConnection) Write(data []byte) (n int, err error) {
for 0 < len(data) {
var frame = make([]byte, totalFrameSize)
var chunk []byte
if dataMaxSize < len(data) {
chunk = data[:dataMaxSize]
data = data[dataMaxSize:]
} else {
chunk = data
data = nil
}
chunkLength := len(chunk)
binary.BigEndian.PutUint16(frame, uint16(chunkLength))
copy(frame[dataLenSize:], chunk)
// encrypt the frame
var sealedFrame = make([]byte, sealedFrameSize)
secretbox.Seal(sealedFrame[:0], frame, sc.sendNonce, sc.shrSecret)
// fmt.Printf("secretbox.Seal(sealed:%X,sendNonce:%X,shrSecret:%X\n", sealedFrame, sc.sendNonce, sc.shrSecret)
incr2Nonce(sc.sendNonce)
// end encryption
_, err := sc.conn.Write(sealedFrame)
if err != nil {
return n, err
}
n += len(chunk)
}
return
}
// CONTRACT: data smaller than dataMaxSize is read atomically.
func (sc *SecretConnection) Read(data []byte) (n int, err error) {
if 0 < len(sc.recvBuffer) {
count := copy(data, sc.recvBuffer)
sc.recvBuffer = sc.recvBuffer[count:]
return
}
sealedFrame := make([]byte, sealedFrameSize)
_, err = io.ReadFull(sc.conn, sealedFrame)
if err != nil {
return
}
// decrypt the frame
var frame = make([]byte, totalFrameSize)
// fmt.Printf("secretbox.Open(sealed:%X,recvNonce:%X,shrSecret:%X\n", sealedFrame, sc.recvNonce, sc.shrSecret)
_, ok := secretbox.Open(frame[:0], sealedFrame, sc.recvNonce, sc.shrSecret)
if !ok {
return n, errors.New("Failed to decrypt SecretConnection")
}
incr2Nonce(sc.recvNonce)
// end decryption
var chunkLength = binary.BigEndian.Uint16(frame) // read the first two bytes
if chunkLength > dataMaxSize {
return 0, errors.New("chunkLength is greater than dataMaxSize")
}
var chunk = frame[dataLenSize : dataLenSize+chunkLength]
n = copy(data, chunk)
sc.recvBuffer = chunk[n:]
return
}
// Close Implements net.Conn
func (sc *SecretConnection) Close() error { return sc.conn.Close() }
// LocalAddr ...
func (sc *SecretConnection) LocalAddr() net.Addr { return sc.conn.(net.Conn).LocalAddr() }
// RemoteAddr ...
func (sc *SecretConnection) RemoteAddr() net.Addr { return sc.conn.(net.Conn).RemoteAddr() }
// SetDeadline ...
func (sc *SecretConnection) SetDeadline(t time.Time) error { return sc.conn.(net.Conn).SetDeadline(t) }
// SetReadDeadline ...
func (sc *SecretConnection) SetReadDeadline(t time.Time) error {
return sc.conn.(net.Conn).SetReadDeadline(t)
}
// SetWriteDeadline ...
func (sc *SecretConnection) SetWriteDeadline(t time.Time) error {
return sc.conn.(net.Conn).SetWriteDeadline(t)
}
func genEphKeys() (ephPub, ephPriv *[32]byte) {
var err error
ephPub, ephPriv, err = box.GenerateKey(crand.Reader)
if err != nil {
types.PanicCrisis("Could not generate ephemeral keypairs")
}
return
}
func shareEphPubKey(conn io.ReadWriter, locEphPub *[32]byte) (remEphPub *[32]byte, err error) {
var err1, err2 error
Parallel(
func() {
_, err1 = conn.Write(locEphPub[:])
},
func() {
remEphPub = new([32]byte)
_, err2 = io.ReadFull(conn, remEphPub[:])
},
)
if err1 != nil {
return nil, err1
}
if err2 != nil {
return nil, err2
}
return remEphPub, nil
}
func computeSharedSecret(remPubKey, locPrivKey *[32]byte) (shrSecret *[32]byte) {
shrSecret = new([32]byte)
box.Precompute(shrSecret, remPubKey, locPrivKey)
return
}
func sort32(foo, bar *[32]byte) (lo, hi *[32]byte) {
if bytes.Compare(foo[:], bar[:]) < 0 {
lo = foo
hi = bar
} else {
lo = bar
hi = foo
}
return
}
func genNonces(loPubKey, hiPubKey *[32]byte, locIsLo bool) (recvNonce, sendNonce *[24]byte) {
nonce1 := hash24(append(loPubKey[:], hiPubKey[:]...))
nonce2 := new([24]byte)
copy(nonce2[:], nonce1[:])
nonce2[len(nonce2)-1] ^= 0x01
if locIsLo {
recvNonce = nonce1
sendNonce = nonce2
} else {
recvNonce = nonce2
sendNonce = nonce1
}
return
}
func genChallenge(loPubKey, hiPubKey *[32]byte) (challenge *[32]byte) {
return hash32(append(loPubKey[:], hiPubKey[:]...))
}
func signChallenge(challenge *[32]byte, locPrivKey crypto.PrivKey) (signature crypto.Signature) {
signature = locPrivKey.Sign(challenge[:])
return
}
type authSigMessage struct {
Key crypto.PubKey
Sig crypto.Signature
}
func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature crypto.Signature) (*authSigMessage, error) {
var recvMsg authSigMessage
var err1, err2 error
Parallel(
func() {
msgByte := make([]byte, len(pubKey.Bytes())+len(signature.Bytes()))
copy(msgByte, pubKey.Bytes())
copy(msgByte[len(pubKey.Bytes()):], signature.Bytes())
_, err1 = sc.Write(msgByte)
},
func() {
readBuffer := make([]byte, authSigMsgSize)
_, err2 = io.ReadFull(sc, readBuffer)
if err2 != nil {
return
}
//n := int(0) // not used.
//recvMsg = wire.ReadBinary(authSigMessage{}, bytes.NewBuffer(readBuffer), authSigMsgSize, &n, &err2).(authSigMessage)
//secret.Info("shareAuthSignature", "readBuffer", readBuffer)
recvMsg.Key, err2 = types.ConsensusCrypto.PubKeyFromBytes(readBuffer[:32])
if err2 != nil {
return
}
recvMsg.Sig, err2 = types.ConsensusCrypto.SignatureFromBytes(readBuffer[32:])
if err2 != nil {
return
}
})
if err1 != nil {
return nil, err1
}
if err2 != nil {
return nil, err2
}
return &recvMsg, nil
}
//--------------------------------------------------------------------------------
// sha256
func hash32(input []byte) (res *[32]byte) {
hasher := sha256.New()
_, err := hasher.Write(input) // nolint: errcheck, gas
if err != nil {
panic(err)
}
resSlice := hasher.Sum(nil)
res = new([32]byte)
copy(res[:], resSlice)
return
}
// We only fill in the first 20 bytes with ripemd160
func hash24(input []byte) (res *[24]byte) {
hasher := ripemd160.New()
_, err := hasher.Write(input) // nolint: errcheck, gas
if err != nil {
panic(err)
}
resSlice := hasher.Sum(nil)
res = new([24]byte)
copy(res[:], resSlice)
return
}
// increment nonce big-endian by 2 with wraparound.
func incr2Nonce(nonce *[24]byte) {
incrNonce(nonce)
incrNonce(nonce)
}
// increment nonce big-endian by 1 with wraparound.
func incrNonce(nonce *[24]byte) {
for i := 23; 0 <= i; i-- {
nonce[i]++
if nonce[i] != 0 {
return
}
}
}
This diff is collapsed.
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dpos
import (
"fmt"
"testing"
"time"
_ "github.com/33cn/chain33/system"
_ "github.com/33cn/plugin/plugin/dapp/init"
_ "github.com/33cn/plugin/plugin/store/init"
"github.com/stretchr/testify/assert"
)
func init() {
setParams(3, 3, 6)
}
func setParams(delegateNum int64, blockInterval int64, continueBlockNum int64) {
dposDelegateNum = delegateNum //委托节点个数,从配置读取,以后可以根据投票结果来定
dposBlockInterval = blockInterval //出块间隔,当前按3s
dposContinueBlockNum = continueBlockNum //一个委托节点当选后,一次性持续出块数量
dposCycle = int64(dposDelegateNum * dposBlockInterval * dposContinueBlockNum)
dposPeriod = int64(dposBlockInterval * dposContinueBlockNum)
}
func printTask(now int64, task *Task) {
fmt.Printf("now:%v|cycleStart:%v|cycleStop:%v|periodStart:%v|periodStop:%v|blockStart:%v|blockStop:%v|nodeId:%v\n",
now,
task.cycleStart,
task.cycleStop,
task.periodStart,
task.periodStop,
task.blockStart,
task.blockStop,
task.nodeID)
}
func assertTask(task *Task, t *testing.T) {
assert.Equal(t, true, task.nodeID >= 0 && task.nodeID < dposDelegateNum)
assert.Equal(t, true, task.cycleStart <= task.periodStart && task.periodStart <= task.blockStart && task.blockStop <= task.periodStop && task.periodStop <= task.cycleStop)
}
func TestDecideTaskByTime(t *testing.T) {
now := time.Now().Unix()
task := DecideTaskByTime(now)
printTask(now, &task)
assertTask(&task, t)
setParams(2, 1, 6)
now = time.Now().Unix()
task = DecideTaskByTime(now)
printTask(now, &task)
assertTask(&task, t)
setParams(21, 1, 12)
now = time.Now().Unix()
task = DecideTaskByTime(now)
printTask(now, &task)
assertTask(&task, t)
setParams(21, 2, 12)
now = time.Now().Unix()
task = DecideTaskByTime(now)
printTask(now, &task)
assertTask(&task, t)
setParams(2, 3, 12)
for i := 0; i < 120; i++ {
now = time.Now().Unix()
task = DecideTaskByTime(now)
printTask(now, &task)
assertTask(&task, t)
time.Sleep(time.Second * 1)
}
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dpos
import (
"time"
)
var (
tickTockBufferSize = 10
)
// TimeoutTicker is a timer that schedules timeouts
// conditional on the height/round/step in the timeoutInfo.
// The timeoutInfo.Duration may be non-positive.
type TimeoutTicker interface {
Start()
Stop()
Chan() <-chan timeoutInfo // on which to receive a timeout
ScheduleTimeout(ti timeoutInfo) // reset the timer
}
// timeoutTicker wraps time.Timer,
// scheduling timeouts only for greater height/round/step
// than what it's already seen.
// Timeouts are scheduled along the tickChan,
// and fired on the tockChan.
type timeoutTicker struct {
timer *time.Timer
tickChan chan timeoutInfo // for scheduling timeouts
tockChan chan timeoutInfo // for notifying about them
}
// NewTimeoutTicker returns a new TimeoutTicker.
func NewTimeoutTicker() TimeoutTicker {
tt := &timeoutTicker{
timer: time.NewTimer(0),
tickChan: make(chan timeoutInfo, tickTockBufferSize),
tockChan: make(chan timeoutInfo, tickTockBufferSize),
}
tt.stopTimer() // don't want to fire until the first scheduled timeout
return tt
}
// OnStart implements cmn.Service. It starts the timeout routine.
func (t *timeoutTicker) Start() {
go t.timeoutRoutine()
}
// OnStop implements cmn.Service. It stops the timeout routine.
func (t *timeoutTicker) Stop() {
t.stopTimer()
}
// Chan returns a channel on which timeouts are sent.
func (t *timeoutTicker) Chan() <-chan timeoutInfo {
return t.tockChan
}
// ScheduleTimeout schedules a new timeout by sending on the internal tickChan.
// The timeoutRoutine is always available to read from tickChan, so this won't block.
// The scheduling may fail if the timeoutRoutine has already scheduled a timeout for a later height/round/step.
func (t *timeoutTicker) ScheduleTimeout(ti timeoutInfo) {
t.tickChan <- ti
}
//-------------------------------------------------------------
// stop the timer and drain if necessary
func (t *timeoutTicker) stopTimer() {
// Stop() returns false if it was already fired or was stopped
if !t.timer.Stop() {
select {
case <-t.timer.C:
default:
dposlog.Debug("Timer already stopped")
}
}
}
// send on tickChan to start a new timer.
// timers are interupted and replaced by new ticks from later steps
// timeouts of 0 on the tickChan will be immediately relayed to the tockChan
func (t *timeoutTicker) timeoutRoutine() {
dposlog.Debug("Starting timeout routine")
var ti timeoutInfo
for {
select {
case newti := <-t.tickChan:
dposlog.Debug("Received tick", "old_ti", ti, "new_ti", newti)
// stop the last timer
t.stopTimer()
// update timeoutInfo and reset timer
// NOTE time.Timer allows duration to be non-positive
ti = newti
t.timer.Reset(ti.Duration)
dposlog.Debug("Scheduled timeout", "dur", ti.Duration)
case <-t.timer.C:
dposlog.Info("Timed out", "dur", ti.Duration, "state", StateTypeMapping[ti.State])
// go routine here guarantees timeoutRoutine doesn't block.
// Determinism comes from playback in the receiveRoutine.
// We can eliminate it by merging the timeoutRoutine into receiveRoutine
// and managing the timeouts ourselves with a millisecond ticker
go func(toi timeoutInfo) { t.tockChan <- toi }(ti)
}
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: dpos_msg.proto
/*
Package types is a generated protocol buffer package.
It is generated from these files:
dpos_msg.proto
It has these top-level messages:
VoteItem
DPosVote
DPosVoteReply
DPosNotify
*/
package types
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type VoteItem struct {
VotedNodeIndex int32 `protobuf:"varint,1,opt,name=votedNodeIndex" json:"votedNodeIndex,omitempty"`
VotedNodeAddress []byte `protobuf:"bytes,2,opt,name=votedNodeAddress,proto3" json:"votedNodeAddress,omitempty"`
CycleStart int64 `protobuf:"varint,3,opt,name=cycleStart" json:"cycleStart,omitempty"`
CycleStop int64 `protobuf:"varint,4,opt,name=cycleStop" json:"cycleStop,omitempty"`
PeriodStart int64 `protobuf:"varint,5,opt,name=periodStart" json:"periodStart,omitempty"`
PeriodStop int64 `protobuf:"varint,6,opt,name=periodStop" json:"periodStop,omitempty"`
Height int64 `protobuf:"varint,7,opt,name=height" json:"height,omitempty"`
VoteID []byte `protobuf:"bytes,8,opt,name=voteID,proto3" json:"voteID,omitempty"`
}
func (m *VoteItem) Reset() { *m = VoteItem{} }
func (m *VoteItem) String() string { return proto.CompactTextString(m) }
func (*VoteItem) ProtoMessage() {}
func (*VoteItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *VoteItem) GetVotedNodeIndex() int32 {
if m != nil {
return m.VotedNodeIndex
}
return 0
}
func (m *VoteItem) GetVotedNodeAddress() []byte {
if m != nil {
return m.VotedNodeAddress
}
return nil
}
func (m *VoteItem) GetCycleStart() int64 {
if m != nil {
return m.CycleStart
}
return 0
}
func (m *VoteItem) GetCycleStop() int64 {
if m != nil {
return m.CycleStop
}
return 0
}
func (m *VoteItem) GetPeriodStart() int64 {
if m != nil {
return m.PeriodStart
}
return 0
}
func (m *VoteItem) GetPeriodStop() int64 {
if m != nil {
return m.PeriodStop
}
return 0
}
func (m *VoteItem) GetHeight() int64 {
if m != nil {
return m.Height
}
return 0
}
func (m *VoteItem) GetVoteID() []byte {
if m != nil {
return m.VoteID
}
return nil
}
// DPosVote Dpos共识的节点投票,为达成共识用。
type DPosVote struct {
VoteItem *VoteItem `protobuf:"bytes,1,opt,name=voteItem" json:"voteItem,omitempty"`
VoteTimestamp int64 `protobuf:"varint,2,opt,name=voteTimestamp" json:"voteTimestamp,omitempty"`
VoterNodeIndex int32 `protobuf:"varint,3,opt,name=voterNodeIndex" json:"voterNodeIndex,omitempty"`
VoterNodeAddress []byte `protobuf:"bytes,4,opt,name=voterNodeAddress,proto3" json:"voterNodeAddress,omitempty"`
Signature []byte `protobuf:"bytes,5,opt,name=signature,proto3" json:"signature,omitempty"`
}
func (m *DPosVote) Reset() { *m = DPosVote{} }
func (m *DPosVote) String() string { return proto.CompactTextString(m) }
func (*DPosVote) ProtoMessage() {}
func (*DPosVote) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *DPosVote) GetVoteItem() *VoteItem {
if m != nil {
return m.VoteItem
}
return nil
}
func (m *DPosVote) GetVoteTimestamp() int64 {
if m != nil {
return m.VoteTimestamp
}
return 0
}
func (m *DPosVote) GetVoterNodeIndex() int32 {
if m != nil {
return m.VoterNodeIndex
}
return 0
}
func (m *DPosVote) GetVoterNodeAddress() []byte {
if m != nil {
return m.VoterNodeAddress
}
return nil
}
func (m *DPosVote) GetSignature() []byte {
if m != nil {
return m.Signature
}
return nil
}
type DPosVoteReply struct {
Vote *DPosVote `protobuf:"bytes,1,opt,name=vote" json:"vote,omitempty"`
}
func (m *DPosVoteReply) Reset() { *m = DPosVoteReply{} }
func (m *DPosVoteReply) String() string { return proto.CompactTextString(m) }
func (*DPosVoteReply) ProtoMessage() {}
func (*DPosVoteReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *DPosVoteReply) GetVote() *DPosVote {
if m != nil {
return m.Vote
}
return nil
}
// DPosNotify Dpos委托节点出块周期结束时,通知其他节点进行高度确认及新节点投票。
type DPosNotify struct {
Vote *VoteItem `protobuf:"bytes,1,opt,name=vote" json:"vote,omitempty"`
HeightStop int64 `protobuf:"varint,2,opt,name=heightStop" json:"heightStop,omitempty"`
HashStop []byte `protobuf:"bytes,3,opt,name=hashStop,proto3" json:"hashStop,omitempty"`
NotifyTimestamp int64 `protobuf:"varint,4,opt,name=notifyTimestamp" json:"notifyTimestamp,omitempty"`
NotifyNodeIndex int32 `protobuf:"varint,5,opt,name=notifyNodeIndex" json:"notifyNodeIndex,omitempty"`
NotifyNodeAddress []byte `protobuf:"bytes,6,opt,name=notifyNodeAddress,proto3" json:"notifyNodeAddress,omitempty"`
Signature []byte `protobuf:"bytes,7,opt,name=signature,proto3" json:"signature,omitempty"`
}
func (m *DPosNotify) Reset() { *m = DPosNotify{} }
func (m *DPosNotify) String() string { return proto.CompactTextString(m) }
func (*DPosNotify) ProtoMessage() {}
func (*DPosNotify) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *DPosNotify) GetVote() *VoteItem {
if m != nil {
return m.Vote
}
return nil
}
func (m *DPosNotify) GetHeightStop() int64 {
if m != nil {
return m.HeightStop
}
return 0
}
func (m *DPosNotify) GetHashStop() []byte {
if m != nil {
return m.HashStop
}
return nil
}
func (m *DPosNotify) GetNotifyTimestamp() int64 {
if m != nil {
return m.NotifyTimestamp
}
return 0
}
func (m *DPosNotify) GetNotifyNodeIndex() int32 {
if m != nil {
return m.NotifyNodeIndex
}
return 0
}
func (m *DPosNotify) GetNotifyNodeAddress() []byte {
if m != nil {
return m.NotifyNodeAddress
}
return nil
}
func (m *DPosNotify) GetSignature() []byte {
if m != nil {
return m.Signature
}
return nil
}
func init() {
proto.RegisterType((*VoteItem)(nil), "types.VoteItem")
proto.RegisterType((*DPosVote)(nil), "types.DPosVote")
proto.RegisterType((*DPosVoteReply)(nil), "types.DPosVoteReply")
proto.RegisterType((*DPosNotify)(nil), "types.DPosNotify")
}
func init() { proto.RegisterFile("dpos_msg.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 385 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0xcd, 0x4a, 0xeb, 0x40,
0x14, 0xc7, 0x49, 0xd3, 0xa4, 0xb9, 0xa7, 0x5f, 0xf7, 0xce, 0xe2, 0x12, 0x2e, 0x17, 0x09, 0x51,
0x24, 0xa8, 0x74, 0xa1, 0xbe, 0x80, 0xd0, 0x4d, 0x37, 0x45, 0xa2, 0xb8, 0x95, 0xd8, 0x8c, 0x4d,
0xa0, 0xe9, 0x0c, 0x33, 0x63, 0xb1, 0x0f, 0xa1, 0xaf, 0xe6, 0x2b, 0xc9, 0x9c, 0x7c, 0x4c, 0x9a,
0xe2, 0xae, 0xe7, 0xf7, 0x9f, 0x8f, 0x73, 0x7e, 0xd3, 0xc0, 0x24, 0xe5, 0x4c, 0x3e, 0x17, 0x72,
0x3d, 0xe3, 0x82, 0x29, 0x46, 0x1c, 0xb5, 0xe7, 0x54, 0x86, 0x9f, 0x3d, 0xf0, 0x9e, 0x98, 0xa2,
0x0b, 0x45, 0x0b, 0x72, 0x0e, 0x93, 0x1d, 0x53, 0x34, 0x5d, 0xb2, 0x94, 0x2e, 0xb6, 0x29, 0x7d,
0xf7, 0xad, 0xc0, 0x8a, 0x9c, 0xb8, 0x43, 0xc9, 0x05, 0xfc, 0x6e, 0xc8, 0x5d, 0x9a, 0x0a, 0x2a,
0xa5, 0xdf, 0x0b, 0xac, 0x68, 0x14, 0x1f, 0x71, 0x72, 0x02, 0xb0, 0xda, 0xaf, 0x36, 0xf4, 0x41,
0x25, 0x42, 0xf9, 0x76, 0x60, 0x45, 0x76, 0xdc, 0x22, 0xe4, 0x3f, 0xfc, 0xaa, 0x2a, 0xc6, 0xfd,
0x3e, 0xc6, 0x06, 0x90, 0x00, 0x86, 0x9c, 0x8a, 0x9c, 0xa5, 0xe5, 0x76, 0x07, 0xf3, 0x36, 0xd2,
0xe7, 0xd7, 0x25, 0xe3, 0xbe, 0x5b, 0x9e, 0x6f, 0x08, 0xf9, 0x0b, 0x6e, 0x46, 0xf3, 0x75, 0xa6,
0xfc, 0x01, 0x66, 0x55, 0xa5, 0xb9, 0xee, 0x75, 0x31, 0xf7, 0x3d, 0xec, 0xbc, 0xaa, 0xc2, 0x2f,
0x0b, 0xbc, 0xf9, 0x3d, 0x93, 0x5a, 0x0a, 0xb9, 0x04, 0x6f, 0x57, 0xc9, 0x41, 0x15, 0xc3, 0xeb,
0xe9, 0x0c, 0xbd, 0xcd, 0x6a, 0x67, 0x71, 0xb3, 0x80, 0x9c, 0xc1, 0x58, 0xff, 0x7e, 0xcc, 0x0b,
0x2a, 0x55, 0x52, 0x70, 0x54, 0x62, 0xc7, 0x87, 0xb0, 0x76, 0x2c, 0x8c, 0x63, 0xdb, 0x38, 0x16,
0x47, 0x8e, 0x45, 0xdb, 0x71, 0xdf, 0x38, 0x6e, 0x73, 0xed, 0x50, 0xe6, 0xeb, 0x6d, 0xa2, 0xde,
0x04, 0x45, 0x47, 0xa3, 0xd8, 0x80, 0xf0, 0x16, 0xc6, 0xf5, 0x40, 0x31, 0xe5, 0x9b, 0x3d, 0x39,
0x85, 0xbe, 0x3e, 0xa2, 0x33, 0x51, 0xb3, 0x06, 0xc3, 0xf0, 0xa3, 0x07, 0xa0, 0xd1, 0x92, 0xa9,
0xfc, 0xf5, 0xa7, 0x3d, 0x8d, 0x05, 0x0c, 0xf5, 0x5b, 0x94, 0x76, 0xf1, 0x2d, 0xca, 0xf1, 0x5b,
0x84, 0xfc, 0x03, 0x2f, 0x4b, 0x64, 0x86, 0xa9, 0x8d, 0x6d, 0x36, 0x35, 0x89, 0x60, 0xba, 0xc5,
0xab, 0x8c, 0xbf, 0xf2, 0xdf, 0xd0, 0xc5, 0x66, 0xa5, 0x51, 0xe8, 0xa0, 0xc2, 0x2e, 0x26, 0x57,
0xf0, 0xc7, 0xa0, 0x5a, 0xa2, 0x8b, 0x17, 0x1f, 0x07, 0x87, 0x16, 0x07, 0x1d, 0x8b, 0x2f, 0x2e,
0x7e, 0x36, 0x37, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x6e, 0xf4, 0x71, 0xcb, 0x48, 0x03, 0x00,
0x00,
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types
import (
"encoding/json"
"io/ioutil"
"time"
"github.com/pkg/errors"
)
//------------------------------------------------------------
// core types for a genesis definition
// GenesisValidator is an initial validator.
type GenesisValidator struct {
PubKey KeyText `json:"pub_key"`
Name string `json:"name"`
}
// GenesisDoc defines the initial conditions for a tendermint blockchain, in particular its validator set.
type GenesisDoc struct {
GenesisTime time.Time `json:"genesis_time"`
ChainID string `json:"chain_id"`
Validators []GenesisValidator `json:"validators"`
AppHash []byte `json:"app_hash"`
AppOptions interface{} `json:"app_options,omitempty"`
}
// SaveAs is a utility method for saving GenensisDoc as a JSON file.
func (genDoc *GenesisDoc) SaveAs(file string) error {
genDocBytes, err := json.Marshal(genDoc)
if err != nil {
return err
}
return WriteFile(file, genDocBytes, 0644)
}
// ValidatorHash returns the hash of the validator set contained in the GenesisDoc
func (genDoc *GenesisDoc) ValidatorHash() []byte {
vals := make([]*Validator, len(genDoc.Validators))
for i, v := range genDoc.Validators {
if len(v.PubKey.Data) == 0 {
panic(Fmt("ValidatorHash pubkey of validator[%v] in gendoc is empty", i))
}
pubkey, err := PubKeyFromString(v.PubKey.Data)
if err != nil {
panic(Fmt("ValidatorHash PubKeyFromBytes failed:%v", err))
}
vals[i] = NewValidator(pubkey)
}
vset := NewValidatorSet(vals)
return vset.Hash()
}
// ValidateAndComplete checks that all necessary fields are present
// and fills in defaults for optional fields left empty
func (genDoc *GenesisDoc) ValidateAndComplete() error {
if genDoc.ChainID == "" {
return errors.Errorf("Genesis doc must include non-empty chain_id")
}
if len(genDoc.Validators) == 0 {
return errors.Errorf("The genesis file must have at least one validator")
}
if genDoc.GenesisTime.IsZero() {
genDoc.GenesisTime = time.Now()
}
return nil
}
//------------------------------------------------------------
// Make genesis state from file
// GenesisDocFromJSON unmarshalls JSON data into a GenesisDoc.
func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) {
genDoc := GenesisDoc{}
err := json.Unmarshal(jsonBlob, &genDoc)
if err != nil {
return nil, err
}
if err := genDoc.ValidateAndComplete(); err != nil {
return nil, err
}
return &genDoc, err
}
// GenesisDocFromFile reads JSON data from a file and unmarshalls it into a GenesisDoc.
func GenesisDocFromFile(genDocFile string) (*GenesisDoc, error) {
jsonBlob, err := ioutil.ReadFile(genDocFile)
if err != nil {
return nil, errors.Wrap(err, "Couldn't read GenesisDoc file")
}
genDoc, err := GenesisDocFromJSON(jsonBlob)
if err != nil {
return nil, errors.Wrap(err, Fmt("Error reading GenesisDoc at %v", genDocFile))
}
return genDoc, nil
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types
import (
"reflect"
"time"
)
var (
// MsgMap define
MsgMap map[byte]reflect.Type
)
// step and message id define
const (
VoteID = byte(0x06)
VoteReplyID = byte(0x07)
NotifyID = byte(0x08)
PacketTypePing = byte(0xff)
PacketTypePong = byte(0xfe)
)
// InitMessageMap ...
func InitMessageMap() {
MsgMap = map[byte]reflect.Type{
VoteID: reflect.TypeOf(DPosVote{}),
VoteReplyID: reflect.TypeOf(DPosVoteReply{}),
NotifyID: reflect.TypeOf(DPosNotify{}),
}
}
// CanonicalJSONVoteItem ...
type CanonicalJSONVoteItem struct {
VotedNodeIndex int32 `json:"votedNodeIndex,omitempty"`
VotedNodeAddress []byte `json:"votedNodeAddress,omitempty"`
CycleStart int64 `json:"cycleStart,omitempty"`
CycleStop int64 `json:"cycleStop,omitempty"`
PeriodStart int64 `json:"periodStart,omitempty"`
PeriodStop int64 `json:"periodStop,omitempty"`
Height int64 `json:"height,omitempty"`
VoteID []byte `json:"voteID,omitempty"`
}
// CanonicalJSONVote ...
type CanonicalJSONVote struct {
VoteItem *CanonicalJSONVoteItem `json:"vote,omitempty"`
VoteTimestamp int64 `json:"voteTimestamp,omitempty"`
VoterNodeIndex int32 `json:"voterNodeIndex,omitempty"`
VoterNodeAddress []byte `json:"voterNodeAddress,omitempty"`
}
// CanonicalJSONOnceVote ...
type CanonicalJSONOnceVote struct {
ChainID string `json:"chain_id"`
Vote CanonicalJSONVote `json:"vote"`
}
// CanonicalVote ...
func CanonicalVote(vote *Vote) CanonicalJSONVote {
return CanonicalJSONVote{
VoteItem: &CanonicalJSONVoteItem{
VotedNodeIndex: vote.VoteItem.VotedNodeIndex,
VotedNodeAddress: vote.VoteItem.VotedNodeAddress,
CycleStart: vote.VoteItem.CycleStart,
CycleStop: vote.VoteItem.CycleStop,
PeriodStart: vote.VoteItem.PeriodStart,
PeriodStop: vote.VoteItem.PeriodStop,
Height: vote.VoteItem.Height,
VoteID: vote.VoteItem.VoteID,
},
VoteTimestamp: vote.VoteTimestamp,
VoterNodeIndex: vote.VoterNodeIndex,
VoterNodeAddress: vote.VoterNodeAddress,
}
}
// CanonicalJSONNotify ...
type CanonicalJSONNotify struct {
VoteItem *CanonicalJSONVoteItem `json:"vote,omitempty"`
HeightStop int64 `json:"heightStop,omitempty"`
NotifyTimestamp int64 `json:"notifyTimestamp,omitempty"`
}
// CanonicalJSONOnceNotify ...
type CanonicalJSONOnceNotify struct {
ChainID string `json:"chain_id"`
Notify CanonicalJSONNotify `json:"vote"`
}
// CanonicalNotify ...
func CanonicalNotify(notify *Notify) CanonicalJSONNotify {
return CanonicalJSONNotify{
VoteItem: &CanonicalJSONVoteItem{
VotedNodeIndex: notify.Vote.VotedNodeIndex,
VotedNodeAddress: notify.Vote.VotedNodeAddress,
CycleStart: notify.Vote.CycleStart,
CycleStop: notify.Vote.CycleStop,
PeriodStart: notify.Vote.PeriodStart,
PeriodStop: notify.Vote.PeriodStop,
Height: notify.Vote.Height,
VoteID: notify.Vote.VoteID,
},
HeightStop: notify.HeightStop,
NotifyTimestamp: notify.NotifyTimestamp,
}
}
// CanonicalTime ...
func CanonicalTime(t time.Time) string {
// note that sending time over go-wire resets it to
// local time, we need to force UTC here, so the
// signatures match
return t.UTC().Format(timeFormat)
}
This diff is collapsed.
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"time"
"github.com/33cn/chain33/common/crypto"
"github.com/33cn/chain33/common/log/log15"
)
// error defines
var (
ErrNotifyInvalidValidatorAddress = errors.New("Invalid validator address for notify")
ErrNotifyInvalidValidatorIndex = errors.New("Invalid validator index for notify")
ErrNotifyInvalidSignature = errors.New("Invalid notify signature")
ErrVoteInvalidValidatorIndex = errors.New("Invalid validator index for vote")
ErrVoteInvalidValidatorAddress = errors.New("Invalid validator address for vote")
ErrVoteInvalidSignature = errors.New("Invalid vote signature")
ErrVoteNil = errors.New("Nil vote")
votelog = log15.New("module", "tendermint-vote")
ConsensusCrypto crypto.Crypto
)
// Signable is an interface for all signable things.
// It typically removes signatures before serializing.
type Signable interface {
WriteSignBytes(chainID string, w io.Writer, n *int, err *error)
}
// SignBytes is a convenience method for getting the bytes to sign of a Signable.
func SignBytes(chainID string, o Signable) []byte {
buf, n, err := new(bytes.Buffer), new(int), new(error)
o.WriteSignBytes(chainID, buf, n, err)
if *err != nil {
PanicCrisis(err)
}
return buf.Bytes()
}
// Vote Represents a vote from validators for consensus.
type Vote struct {
*DPosVote
}
// WriteSignBytes ...
func (vote *Vote) WriteSignBytes(chainID string, w io.Writer, n *int, err *error) {
if *err != nil {
return
}
canonical := CanonicalJSONOnceVote{
chainID,
CanonicalVote(vote),
}
byteVote, e := json.Marshal(&canonical)
if e != nil {
*err = e
votelog.Error("vote WriteSignBytes marshal failed", "err", e)
return
}
number, writeErr := w.Write(byteVote)
*n = number
*err = writeErr
}
// Copy ...
func (vote *Vote) Copy() *Vote {
voteCopy := *vote
return &voteCopy
}
func (vote *Vote) String() string {
if vote == nil {
return "nil-Vote"
}
return fmt.Sprintf("Vote{VotedNodeIndex:%v, VotedNodeAddr:%X,Cycle[%v,%v],Period[%v,%v],StartHeight:%v,VoteId:%X,VoteTimeStamp:%v,VoteNodeIndex:%v,VoteNodeAddr:%X,Sig:%X}",
vote.VoteItem.VotedNodeIndex,
Fingerprint(vote.VoteItem.VotedNodeAddress),
vote.VoteItem.CycleStart,
vote.VoteItem.CycleStop,
vote.VoteItem.PeriodStart,
vote.VoteItem.PeriodStop,
vote.VoteItem.Height,
Fingerprint(vote.VoteItem.VoteID),
CanonicalTime(time.Unix(0, vote.VoteTimestamp)),
vote.VoterNodeIndex,
Fingerprint(vote.VoterNodeAddress),
Fingerprint(vote.Signature),
)
}
// Verify ...
func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error {
addr := GenAddressByPubKey(pubKey)
if !bytes.Equal(addr, vote.VoterNodeAddress) {
return ErrVoteInvalidValidatorAddress
}
sig, err := ConsensusCrypto.SignatureFromBytes(vote.Signature)
if err != nil {
votelog.Error("vote Verify failed", "err", err)
return err
}
if !pubKey.VerifyBytes(SignBytes(chainID, vote), sig) {
return ErrVoteInvalidSignature
}
return nil
}
// Hash ...
func (vote *Vote) Hash() []byte {
if vote == nil {
//votelog.Error("vote hash is nil")
return nil
}
bytes, err := json.Marshal(vote)
if err != nil {
votelog.Error("vote hash marshal failed", "err", err)
return nil
}
return crypto.Ripemd160(bytes)
}
// Notify Represents a notify from validators for consensus.
type Notify struct {
*DPosNotify
}
// WriteSignBytes ...
func (notify *Notify) WriteSignBytes(chainID string, w io.Writer, n *int, err *error) {
if *err != nil {
return
}
canonical := CanonicalJSONOnceNotify{
chainID,
CanonicalNotify(notify),
}
byteVote, e := json.Marshal(&canonical)
if e != nil {
*err = e
votelog.Error("vote WriteSignBytes marshal failed", "err", e)
return
}
number, writeErr := w.Write(byteVote)
*n = number
*err = writeErr
}
// Copy ...
func (notify *Notify) Copy() *Notify {
notifyCopy := *notify
return &notifyCopy
}
func (notify *Notify) String() string {
if notify == nil {
return "nil-notify"
}
return fmt.Sprintf("Notify{VotedNodeIndex:%v, VotedNodeAddr:%X,Cycle[%v,%v],Period[%v,%v],StartHeight:%v,VoteId:%X,NotifyTimeStamp:%v,HeightStop:%v,NotifyNodeIndex:%v,NotifyNodeAddr:%X,Sig:%X}",
notify.Vote.VotedNodeIndex,
Fingerprint(notify.Vote.VotedNodeAddress),
notify.Vote.CycleStart,
notify.Vote.CycleStop,
notify.Vote.PeriodStart,
notify.Vote.PeriodStop,
notify.Vote.Height,
Fingerprint(notify.Vote.VoteID),
CanonicalTime(time.Unix(0, notify.NotifyTimestamp)),
notify.HeightStop,
notify.NotifyNodeIndex,
Fingerprint(notify.NotifyNodeAddress),
Fingerprint(notify.Signature),
)
}
// Verify ...
func (notify *Notify) Verify(chainID string, pubKey crypto.PubKey) error {
addr := GenAddressByPubKey(pubKey)
if !bytes.Equal(addr, notify.NotifyNodeAddress) {
return ErrNotifyInvalidValidatorAddress
}
sig, err := ConsensusCrypto.SignatureFromBytes(notify.Signature)
if err != nil {
votelog.Error("Notify Verify failed", "err", err)
return err
}
if !pubKey.VerifyBytes(SignBytes(chainID, notify), sig) {
return ErrNotifyInvalidSignature
}
return nil
}
// Hash ...
func (notify *Notify) Hash() []byte {
if notify == nil {
//votelog.Error("vote hash is nil")
return nil
}
bytes, err := json.Marshal(notify)
if err != nil {
votelog.Error("vote hash marshal failed", "err", err)
return nil
}
return crypto.Ripemd160(bytes)
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types
import (
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"strconv"
"sync"
"syscall"
"time"
)
const (
// RFC3339Millis ...
RFC3339Millis = "2006-01-02T15:04:05.000Z" // forced microseconds
timeFormat = RFC3339Millis
)
var (
randgen *rand.Rand
randMux sync.Mutex
// Fmt ...
Fmt = fmt.Sprintf
)
// Init ...
func Init() {
if randgen == nil {
randMux.Lock()
randgen = rand.New(rand.NewSource(time.Now().UnixNano()))
randMux.Unlock()
}
}
// WriteFile ...
func WriteFile(filePath string, contents []byte, mode os.FileMode) error {
return ioutil.WriteFile(filePath, contents, mode)
}
// WriteFileAtomic ...
func WriteFileAtomic(filePath string, newBytes []byte, mode os.FileMode) error {
dir := filepath.Dir(filePath)
f, err := ioutil.TempFile(dir, "")
if err != nil {
return err
}
_, err = f.Write(newBytes)
if err == nil {
err = f.Sync()
}
if closeErr := f.Close(); err == nil {
err = closeErr
}
if permErr := os.Chmod(f.Name(), mode); err == nil {
err = permErr
}
if err == nil {
err = os.Rename(f.Name(), filePath)
}
// any err should result in full cleanup
if err != nil {
if er := os.Remove(f.Name()); er != nil {
fmt.Printf("WriteFileAtomic Remove failed:%v", er)
}
}
return err
}
// Tempfile ...
func Tempfile(prefix string) (*os.File, string) {
file, err := ioutil.TempFile("", prefix)
if err != nil {
panic(err)
}
return file, file.Name()
}
// Fingerprint ...
func Fingerprint(slice []byte) []byte {
fingerprint := make([]byte, 6)
copy(fingerprint, slice)
return fingerprint
}
// Kill ...
func Kill() error {
p, err := os.FindProcess(os.Getpid())
if err != nil {
return err
}
return p.Signal(syscall.SIGTERM)
}
// Exit ...
func Exit(s string) {
fmt.Printf(s + "\n")
os.Exit(1)
}
// Parallel ...
func Parallel(tasks ...func()) {
var wg sync.WaitGroup
wg.Add(len(tasks))
for _, task := range tasks {
go func(task func()) {
task()
wg.Done()
}(task)
}
wg.Wait()
}
// Percent represents a percentage in increments of 1/1000th of a percent.
type Percent uint32
// Float ...
func (p Percent) Float() float64 {
return float64(p) * 1e-3
}
func (p Percent) String() string {
var buf [12]byte
b := strconv.AppendUint(buf[:0], uint64(p)/1000, 10)
n := len(b)
b = strconv.AppendUint(b, 1000+uint64(p)%1000, 10)
b[n] = '.'
return string(append(b, '%'))
}
// MinInt ...
func MinInt(a, b int) int {
if a < b {
return a
}
return b
}
// MaxInt ...
func MaxInt(a, b int) int {
if a > b {
return a
}
return b
}
// RandIntn ...
func RandIntn(n int) int {
if n <= 0 {
panic("invalid argument to Intn")
}
if n <= 1<<31-1 {
randMux.Lock()
i32 := randgen.Int31n(int32(n))
randMux.Unlock()
return int(i32)
}
randMux.Lock()
i64 := randgen.Int63n(int64(n))
randMux.Unlock()
return int(i64)
}
// RandUint32 ...
func RandUint32() uint32 {
randMux.Lock()
u32 := randgen.Uint32()
randMux.Unlock()
return u32
}
// RandInt63n ...
func RandInt63n(n int64) int64 {
randMux.Lock()
i64 := randgen.Int63n(n)
randMux.Unlock()
return i64
}
// PanicSanity ...
func PanicSanity(v interface{}) {
panic(Fmt("Panicked on a Sanity Check: %v", v))
}
// PanicCrisis ...
func PanicCrisis(v interface{}) {
panic(Fmt("Panicked on a Crisis: %v", v))
}
// PanicQ ...
func PanicQ(v interface{}) {
panic(Fmt("Panicked questionably: %v", v))
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types
import (
"bytes"
"sort"
"strings"
"github.com/33cn/chain33/common/crypto"
"github.com/33cn/chain33/common/merkle"
)
// Validator ...
type Validator struct {
Address []byte `json:"address"`
PubKey []byte `json:"pub_key"`
}
// NewValidator ...
func NewValidator(pubKey crypto.PubKey) *Validator {
return &Validator{
Address: GenAddressByPubKey(pubKey),
PubKey: pubKey.Bytes(),
}
}
// Copy Creates a new copy of the validator so we can mutate accum.
// Panics if the validator is nil.
func (v *Validator) Copy() *Validator {
vCopy := *v
return &vCopy
}
func (v *Validator) String() string {
if v == nil {
return "nil-Validator"
}
return Fmt("Validator{%v %v}",
v.Address,
v.PubKey)
}
// Hash computes the unique ID of a validator with a given voting power.
// It excludes the Accum value, which changes with every round.
func (v *Validator) Hash() []byte {
hashBytes := v.Address
hashBytes = append(hashBytes, v.PubKey...)
return crypto.Ripemd160(hashBytes)
}
// ValidatorSet represent a set of *Validator at a given height.
// The validators can be fetched by address or index.
// The index is in order of .Address, so the indices are fixed
// for all rounds of a given blockchain height.
// On the other hand, the .AccumPower of each validator and
// the designated .GetProposer() of a set changes every round,
// upon calling .IncrementAccum().
// NOTE: Not goroutine-safe.
// NOTE: All get/set to validators should copy the value for safety.
// TODO: consider validator Accum overflow
type ValidatorSet struct {
// NOTE: persisted via reflect, must be exported.
Validators []*Validator `json:"validators"`
}
// NewValidatorSet ...
func NewValidatorSet(vals []*Validator) *ValidatorSet {
validators := make([]*Validator, len(vals))
for i, val := range vals {
validators[i] = val.Copy()
}
sort.Sort(ValidatorsByAddress(validators))
vs := &ValidatorSet{
Validators: validators,
}
return vs
}
// Copy ...
func (valSet *ValidatorSet) Copy() *ValidatorSet {
validators := make([]*Validator, len(valSet.Validators))
for i, val := range valSet.Validators {
// NOTE: must copy, since IncrementAccum updates in place.
validators[i] = val.Copy()
}
return &ValidatorSet{
Validators: validators,
}
}
// HasAddress ...
func (valSet *ValidatorSet) HasAddress(address []byte) bool {
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
})
return idx != len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address)
}
// GetByAddress ...
func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) {
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
})
if idx != len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) {
return idx, valSet.Validators[idx].Copy()
}
return -1, nil
}
// GetByIndex returns the validator by index.
// It returns nil values if index < 0 or
// index >= len(ValidatorSet.Validators)
func (valSet *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) {
if index < 0 || index >= len(valSet.Validators) {
return nil, nil
}
val = valSet.Validators[index]
return val.Address, val.Copy()
}
// Size ...
func (valSet *ValidatorSet) Size() int {
return len(valSet.Validators)
}
// Hash ...
func (valSet *ValidatorSet) Hash() []byte {
if len(valSet.Validators) == 0 {
return nil
}
hashables := make([][]byte, len(valSet.Validators))
for i, val := range valSet.Validators {
hashables[i] = val.Hash()
}
return merkle.GetMerkleRoot(hashables)
}
// Add ...
func (valSet *ValidatorSet) Add(val *Validator) (added bool) {
val = val.Copy()
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(val.Address, valSet.Validators[i].Address) <= 0
})
if idx == len(valSet.Validators) {
valSet.Validators = append(valSet.Validators, val)
return true
} else if bytes.Equal(valSet.Validators[idx].Address, val.Address) {
return false
} else {
newValidators := make([]*Validator, len(valSet.Validators)+1)
copy(newValidators[:idx], valSet.Validators[:idx])
newValidators[idx] = val
copy(newValidators[idx+1:], valSet.Validators[idx:])
valSet.Validators = newValidators
return true
}
}
// Update ...
func (valSet *ValidatorSet) Update(val *Validator) (updated bool) {
index, sameVal := valSet.GetByAddress(val.Address)
if sameVal == nil {
return false
}
valSet.Validators[index] = val.Copy()
return true
}
// Remove ...
func (valSet *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) {
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
})
if idx == len(valSet.Validators) || !bytes.Equal(valSet.Validators[idx].Address, address) {
return nil, false
}
removedVal := valSet.Validators[idx]
newValidators := valSet.Validators[:idx]
if idx+1 < len(valSet.Validators) {
newValidators = append(newValidators, valSet.Validators[idx+1:]...)
}
valSet.Validators = newValidators
return removedVal, true
}
// Iterate ...
func (valSet *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) {
for i, val := range valSet.Validators {
stop := fn(i, val.Copy())
if stop {
break
}
}
}
func (valSet *ValidatorSet) String() string {
return valSet.StringIndented("")
}
// StringIndented ...
func (valSet *ValidatorSet) StringIndented(indent string) string {
if valSet == nil {
return "nil-ValidatorSet"
}
valStrings := []string{}
valSet.Iterate(func(index int, val *Validator) bool {
valStrings = append(valStrings, val.String())
return false
})
return Fmt(`ValidatorSet{
%s Validators:
%s %v
%s}`,
indent,
indent, strings.Join(valStrings, "\n"+indent+" "),
indent)
}
// Implements sort for sorting validators by address.
// ValidatorsByAddress ...
type ValidatorsByAddress []*Validator
func (vs ValidatorsByAddress) Len() int {
return len(vs)
}
func (vs ValidatorsByAddress) Less(i, j int) bool {
return bytes.Compare(vs[i].Address, vs[j].Address) == -1
}
func (vs ValidatorsByAddress) Swap(i, j int) {
it := vs[i]
vs[i] = vs[j]
vs[j] = it
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dpos
import (
"bytes"
"encoding/json"
"fmt"
"math/rand"
ttypes "github.com/33cn/plugin/plugin/consensus/dpos/types"
)
var (
r *rand.Rand
)
// ValidatorMgr ...
type ValidatorMgr struct {
// Immutable
ChainID string
// Validators are persisted to the database separately every time they change,
// so we can query for historical validator sets.
// Note that if s.LastBlockHeight causes a valset change,
// we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1
Validators *ttypes.ValidatorSet
// The latest AppHash we've received from calling abci.Commit()
AppHash []byte
}
// Copy makes a copy of the State for mutating.
func (s ValidatorMgr) Copy() ValidatorMgr {
return ValidatorMgr{
ChainID: s.ChainID,
Validators: s.Validators.Copy(),
AppHash: s.AppHash,
}
}
// Equals returns true if the States are identical.
func (s ValidatorMgr) Equals(s2 ValidatorMgr) bool {
return bytes.Equal(s.Bytes(), s2.Bytes())
}
// Bytes serializes the State using go-wire.
func (s ValidatorMgr) Bytes() []byte {
sbytes, err := json.Marshal(s)
if err != nil {
fmt.Printf("Error reading GenesisDoc: %v", err)
return nil
}
return sbytes
}
// IsEmpty returns true if the State is equal to the empty State.
func (s ValidatorMgr) IsEmpty() bool {
return s.Validators == nil // XXX can't compare to Empty
}
// GetValidators returns the last and current validator sets.
func (s ValidatorMgr) GetValidators() (current *ttypes.ValidatorSet) {
return s.Validators
}
// MakeGenesisValidatorMgr creates validators from ttypes.GenesisDoc.
func MakeGenesisValidatorMgr(genDoc *ttypes.GenesisDoc) (ValidatorMgr, error) {
err := genDoc.ValidateAndComplete()
if err != nil {
return ValidatorMgr{}, fmt.Errorf("Error in genesis file: %v", err)
}
// Make validators slice
validators := make([]*ttypes.Validator, len(genDoc.Validators))
for i, val := range genDoc.Validators {
pubKey, err := ttypes.PubKeyFromString(val.PubKey.Data)
if err != nil {
return ValidatorMgr{}, fmt.Errorf("Error validate[%v] in genesis file: %v", i, err)
}
// Make validator
validators[i] = &ttypes.Validator{
Address: ttypes.GenAddressByPubKey(pubKey),
PubKey: pubKey.Bytes(),
}
}
return ValidatorMgr{
ChainID: genDoc.ChainID,
Validators: ttypes.NewValidatorSet(validators),
AppHash: genDoc.AppHash,
}, nil
}
package init
import (
_ "github.com/33cn/plugin/plugin/consensus/dpos" //auto gen
_ "github.com/33cn/plugin/plugin/consensus/para" //auto gen
_ "github.com/33cn/plugin/plugin/consensus/pbft" //auto gen
_ "github.com/33cn/plugin/plugin/consensus/raft" //auto gen
......
......@@ -36,7 +36,7 @@ var r *rand.Rand
func createConn(ip string) {
var err error
url := ip + ":8802"
url := ip + ":9802"
fmt.Println("grpc url:", url)
conn, err = grpc.Dial(url, grpc.WithInsecure())
if err != nil {
......
......@@ -139,7 +139,7 @@ func Put(ip string, size string, privkey string) {
fmt.Fprintln(os.Stderr, err)
return
}
url := "http://" + ip + ":8801"
url := "http://" + ip + ":9801"
if privkey == "" {
_, priv := genaddress()
privkey = common.ToHex(priv.Bytes())
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment