Commit 10ae4b09 authored by pengjun's avatar pengjun

Merge branch 'master' of https://github.com/jpeng-go/plugin

parents 5296eaaf fa1b1ba6
......@@ -37,9 +37,9 @@ chain33_BlockWait() {
break
fi
count=$((count + 1))
sleep 1
sleep 0.1
done
echo "wait new block $count s, cur height=$expect,old=$cur_height"
echo "wait new block $count/10 s, cur height=$expect,old=$cur_height"
}
chain33_QueryTx() {
......@@ -96,6 +96,7 @@ chain33_SendToAddress() {
[ "$ok" == true ]
hash=$(jq -r ".result.hash" <<<"$resp")
echo "hash"
chain33_QueryTx "$hash" "$MAIN_HTTP"
}
......@@ -164,3 +165,28 @@ chain33_LastBlockhash() {
LAST_BLOCK_HASH=$result
echo -e "######\\n last blockhash is $LAST_BLOCK_HASH \\n######"
}
chain33_applyCoins() {
echo "chain33_getMainChainCoins"
if [ "$#" -lt 3 ]; then
echo "chain33_getMainCoins wrong params"
exit 1
fi
local targetAddr=$1
local count=$2
local ip=$3
if [ "$count" -gt 15000000000 ]; then
echo "chain33_getMainCoins wrong coins count,should less than 150 00000000"
exit 1
fi
local poolAddr="1PcGKYYoLn1PLLJJodc1UpgWGeFAQasAkx"
chain33_SendToAddress "${poolAddr}" "${targetAddr}" "$count" "${ip}"
}
chain33_debug_function() {
set -x
eval "$@"
set +x
}
......@@ -10,15 +10,16 @@ function dapp_test_rpc() {
if [ -d dapptest ]; then
cp $DAPP_TEST_COMMON dapptest/
cd dapptest || return
dir=$(find . -maxdepth 1 -type d ! -name dapptest ! -name . | sed 's/^\.\///')
dir=$(find . -maxdepth 1 -type d ! -name dapptest ! -name ticket ! -name . | sed 's/^\.\///' | sort)
echo "dapps list: $dir"
for app in $dir; do
echo "=========== # $app rpc test ============="
./"$app/${RPC_TESTFILE}" "$ip"
echo "=========== # $app rpc end ============="
done
##ticket用例最后执行
./ticket/"${RPC_TESTFILE}" "$ip"
fi
echo "============ # dapp rpc test end ============="
}
#dapp_test_rpc $1
......@@ -80,6 +80,7 @@ function base_init() {
sed -i $sedfix 's/^powLimitBits=.*/powLimitBits="0x1f2fffff"/g' chain33.toml
sed -i $sedfix 's/^targetTimePerBlock=.*/targetTimePerBlock=1/g' chain33.toml
sed -i $sedfix 's/^targetTimespan=.*/targetTimespan=10000000/g' chain33.toml
# p2p
sed -i $sedfix 's/^seeds=.*/seeds=["chain33:13802","chain32:13802","chain31:13802"]/g' chain33.toml
......@@ -103,6 +104,9 @@ function base_init() {
# ticket
sed -i $sedfix 's/^ticketPrice =.*/ticketPrice = 10000/g' chain33.toml
#relay genesis
sed -i $sedfix 's/^genesis="12qyocayNF7.*/genesis="1G5Cjy8LuQex2fuYv3gzb7B8MxAnxLEqt3"/g' chain33.toml
}
function start() {
......@@ -128,13 +132,13 @@ function start() {
${CLI} net info
${CLI} net peer_info
local count=100
local count=1000
while [ $count -gt 0 ]; do
peersCount=$(${CLI} net peer_info | jq '.[] | length')
if [ "${peersCount}" -ge 2 ]; then
break
fi
sleep 5
sleep 1
((count--))
echo "peers error: peersCount=${peersCount}"
done
......@@ -209,10 +213,10 @@ function miner() {
sleep 1
echo "=========== # close auto mining ============="
result=$(${1} wallet auto_mine -f 0 | jq ".isok")
if [ "${result}" = "false" ]; then
exit 1
fi
#result=$(${1} wallet auto_mine -f 0 | jq ".isok")
#if [ "${result}" = "false" ]; then
# exit 1
#fi
}
function block_wait() {
......@@ -229,9 +233,9 @@ function block_wait() {
break
fi
count=$((count + 1))
sleep 1
sleep 0.1
done
echo "wait new block $count s, cur height=$expect,old=$cur_height"
echo "wait new block $count/10 s, cur height=$expect,old=$cur_height"
}
function block_wait2height() {
......@@ -254,9 +258,9 @@ function block_wait2height() {
break
fi
count=$((count + 1))
sleep 1
sleep 0.1
done
echo "wait new block $count s, cur_height=$new_height,expect=$expect"
echo "wait new block $count/10 s, cur_height=$new_height,expect=$expect"
}
function check_docker_status() {
......@@ -377,11 +381,21 @@ function dapp_test_address() {
if [ -z "${result}" ]; then
exit 1
fi
result=$(${1} account import_key -k 9d315182e56fde7fadb94408d360203894e5134216944e858f9b31f70e9ecf40 -l rpctestpooladdr | jq ".label")
echo "${result}"
if [ -z "${result}" ]; then
exit 1
fi
sleep 1
hash=$(${1} send coins transfer -a 1500 -n transfer -t 1PUiGcbsccfxW3zuvHXZBJfznziph5miAo -k 2116459C0EC8ED01AA0EEAE35CAC5C96F94473F7816F114873291217303F6989)
echo "${hash}"
#total allocation for rpc test
hash=$(${1} send coins transfer -a 8000 -n transfer -t 1PcGKYYoLn1PLLJJodc1UpgWGeFAQasAkx -k 2116459C0EC8ED01AA0EEAE35CAC5C96F94473F7816F114873291217303F6989)
echo "${hash}"
block_wait "${1}" 1
}
......@@ -430,7 +444,7 @@ function main() {
dapp_run test "${ip}"
### rpc test ###
#rpc_test "${ip}"
rpc_test "${ip}"
### finish ###
check_docker_container
......
......@@ -80,18 +80,14 @@ minerExecs=["paracross"] #配置挖矿合约
[mver.consensus]
fundKeyAddr = "1BQXS6TxaYYG5mADaWij4AxhZZUTpw95a5"
powLimitBits = "0x1f00ffff"
maxTxNumber = 1600
[mver.consensus.paracross]
coinReward = 18
coinDevFund = 12
ticketPrice = 10000
powLimitBits = "0x1f00ffff"
retargetAdjustmentFactor = 4
futureBlockTime = 16
ticketFrozenTime = 5 #5s only for test
ticketWithdrawTime = 10 #10s only for test
ticketMinerWaitTime = 2 #2s only for test
maxTxNumber = 1600 #160
targetTimespan = 2304
targetTimePerBlock = 16
[consensus.sub.para]
#主链节点的grpc服务器ip,当前可以支持多ip负载均衡,如“101.37.227.226:8802,39.97.20.242:8802,47.107.15.126:8802,jiedian2.bityuan.com,cloud.bityuan.com”
......@@ -305,6 +301,9 @@ ForkUnfreezeIDX= 0
[fork.sub.autonomy]
Enable=0
[fork.sub.jsvm]
Enable=0
#对已有的平行链如果不是从0开始同步数据,需要设置这个kvmvccmavl的对应平行链高度的fork,如果从0开始同步,statehash会跟以前mavl的不同
[fork.sub.store-kvmvccmavl]
ForkKvmvccmavl=0
......
......@@ -90,37 +90,43 @@ minerExecs=["ticket", "autonomy"]
[mver.consensus]
fundKeyAddr = "1BQXS6TxaYYG5mADaWij4AxhZZUTpw95a5"
powLimitBits="0x1f00ffff"
maxTxNumber = 1600 #160
[mver.consensus.ForkChainParamV1]
maxTxNumber = 1500
[mver.consensus.ForkTicketFundAddrV1]
fundKeyAddr = "1Ji3W12KGScCM7C2p8bg635sNkayDM8MGY"
[mver.consensus.ticket]
coinReward = 18
coinDevFund = 12
ticketPrice = 10000
powLimitBits="0x1f00ffff"
retargetAdjustmentFactor = 4
futureBlockTime = 16
ticketFrozenTime = 5 #5s only for test
ticketWithdrawTime = 10 #10s only for test
ticketMinerWaitTime = 2 #2s only for test
maxTxNumber = 1600 #160
targetTimespan = 2304
targetTimespan=2304
targetTimePerBlock=16
[mver.consensus.ForkChainParamV1]
[mver.consensus.ticket.ForkChainParamV1]
futureBlockTime = 15
ticketFrozenTime = 43200
ticketWithdrawTime = 172800
ticketMinerWaitTime = 7200
maxTxNumber = 1500
targetTimespan = 2160
targetTimePerBlock = 15
targetTimespan=2160
targetTimePerBlock=15
[mver.consensus.ForkChainParamV2]
[mver.consensus.ticket.ForkChainParamV2]
coinReward = 5
coinDevFund = 3
targetTimespan = 720
targetTimePerBlock = 5
targetTimespan=720
targetTimePerBlock=5
ticketPrice = 3000
[mver.consensus.ForkTicketFundAddrV1]
fundKeyAddr = "1Ji3W12KGScCM7C2p8bg635sNkayDM8MGY"
[consensus.sub.ticket]
genesisBlockTime=1514533394
......
......@@ -3,7 +3,7 @@ module github.com/33cn/plugin
go 1.12
require (
github.com/33cn/chain33 v0.0.0-20190906093700-93b043f5fce6
github.com/33cn/chain33 v0.0.0-20190916043245-c5f98f6a92f5
github.com/BurntSushi/toml v0.3.1
github.com/NebulousLabs/Sia v1.3.7
github.com/btcsuite/btcd v0.0.0-20181013004428-67e573d211ac
......
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/33cn/chain33 v0.0.0-20190906093700-93b043f5fce6 h1:zj6rCGS9sowochmjveOoxhunvhqpkx1S6abhhhfS7KU=
github.com/33cn/chain33 v0.0.0-20190906093700-93b043f5fce6/go.mod h1:4I8n+Zyf3t0UKM5jjpqJY627Tub62oXkLsdzIv4r6rQ=
github.com/33cn/chain33 v0.0.0-20190916043245-c5f98f6a92f5 h1:Nr0twQPrU5o8JE7jjXlaUNS8qQpREQxLi9IAYSAGAIc=
github.com/33cn/chain33 v0.0.0-20190916043245-c5f98f6a92f5/go.mod h1:4I8n+Zyf3t0UKM5jjpqJY627Tub62oXkLsdzIv4r6rQ=
github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7 h1:PqzgE6kAMi81xWQA2QIVxjWkFHptGgC547vchpUbtFo=
github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
......
Title="local"
[log]
# 日志级别,支持debug(dbug)/info/warn/error(eror)/crit
loglevel = "debug"
logConsoleLevel = "info"
# 日志文件名,可带目录,所有生成的日志文件都放到此目录下
logFile = "logs/chain33.log"
# 单个日志文件的最大值(单位:兆)
maxFileSize = 300
# 最多保存的历史日志文件个数
maxBackups = 100
# 最多保存的历史日志消息(单位:天)
maxAge = 28
# 日志文件名是否使用本地事件(否则使用UTC时间)
localTime = true
# 历史日志文件是否压缩(压缩格式为gz)
compress = true
# 是否打印调用源文件和行号
callerFile = false
# 是否打印调用方法
callerFunction = false
[blockchain]
defCacheSize=512
maxFetchBlockNum=128
timeoutSeconds=5
batchBlockNum=128
driver="leveldb"
dbPath="datadir"
dbCache=64
isStrongConsistency=true
singleMode=true
batchsync=false
enableTxQuickIndex=true
[p2p]
seeds=["127.0.0.1:13802"]
enable=true
isSeed=true
serverStart=true
innerSeedEnable=false
useGithub=false
innerBounds=300
msgCacheSize=10240
driver="leveldb"
dbPath="datadir/addrbook"
dbCache=4
grpcLogFile="grpc33.log"
version=199
verMix=199
verMax=199
[rpc]
jrpcBindAddr="localhost:8801"
grpcBindAddr="localhost:8802"
whitelist=["127.0.0.1"]
jrpcFuncWhitelist=["*"]
grpcFuncWhitelist=["*"]
[mempool]
name="timeline"
poolCacheSize=10240
minTxFee=100000
[consensus]
name="tendermint"
minerstart=false
[mver.consensus]
fundKeyAddr = "1BQXS6TxaYYG5mADaWij4AxhZZUTpw95a5"
coinReward = 18
coinDevFund = 12
ticketPrice = 10000
powLimitBits = "0x1f00ffff"
retargetAdjustmentFactor = 4
futureBlockTime = 16
ticketFrozenTime = 5 #5s only for test
ticketWithdrawTime = 10 #10s only for test
ticketMinerWaitTime = 2 #2s only for test
maxTxNumber = 1600 #160
targetTimespan = 2304
targetTimePerBlock = 16
[mver.consensus.ForkChainParamV1]
maxTxNumber = 10000
targetTimespan = 288 #only for test
targetTimePerBlock = 2
[mver.consensus.ForkChainParamV2]
powLimitBits = "0x1f2fffff"
[consensus.sub.dpos]
genesis="14KEKbYtKKQm4wMthSK9J4La4nAiidGozt"
genesisBlockTime=1514533394
timeoutCheckConnections=1000
timeoutVoting=3000
timeoutWaitNotify=2000
createEmptyBlocks=false
createEmptyBlocksInterval=0
validatorNodes=["127.0.0.1:36656"]
delegateNum=1
blockInterval=2
continueBlockNum=12
isValidator=true
rpcAddr="http://localhost:9801"
#shuffleType为1表示使用固定出块顺序,为2表示使用vrf信息进行出块顺序洗牌
shuffleType=2
#是否更新topN,如果为true,根据下面几个配置项定期更新topN节点;如果为false,则一直使用初始配置的节点,不关注投票结果
whetherUpdateTopN=true
blockNumToUpdateDelegate=200
registTopNHeightLimit=10
updateTopNHeightLimit=20
[store]
name="kvdb"
driver="leveldb"
dbPath="datadir/mavltree"
dbCache=128
[store.sub.kvdb]
enableMavlPrefix=false
enableMVCC=false
[wallet]
minFee=100000
driver="leveldb"
dbPath="wallet"
dbCache=16
signType="secp256k1"
[wallet.sub.ticket]
minerdisable=false
minerwhitelist=["*"]
[exec]
isFree=false
minExecFee=100000
enableStat=false
enableMVCC=false
alias=["token1:token","token2:token","token3:token"]
saveTokenTxList=false
[exec.sub.cert]
# 是否启用证书验证和签名
enable=false
# 加密文件路径
cryptoPath="authdir/crypto"
# 带证书签名类型,支持"auth_ecdsa", "auth_sm2"
signType="auth_ecdsa"
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
all:
sh ./create_protobuf.sh
#!/bin/sh
protoc --go_out=plugins=grpc:../types ./*.proto --proto_path=. --proto_path="../types/"
syntax = "proto3";
package types;
//CycleBoundaryInfo cycle边界信息
message CycleBoundaryInfo{
int64 cycle = 1;
int64 stopHeight = 2;
string stopHash = 3;
}
//SuperNode 超级节点信息
message SuperNode{
bytes address = 1;
bytes pubKey = 2;
}
//VoteItem 投票信息
message VoteItem {
int32 votedNodeIndex = 1; //被投票的节点索引
bytes votedNodeAddress = 2; //被投票的节点地址
int64 cycle = 3; //大周期序号
int64 cycleStart = 4; //大周期起始时间
int64 cycleStop = 5; //大周期终止时间
int64 periodStart = 6; //新节点负责出块的起始时间
int64 periodStop = 7; //新节点负责出块的终止时间
int64 height = 8; //新节点负责出块的起始高度
bytes voteID = 9; //选票ID
CycleBoundaryInfo lastCBInfo = 10;
int64 shuffleType = 11;
repeated SuperNode validators = 12;
repeated SuperNode vrfValidators = 13;
repeated SuperNode noVrfValidators = 14;
}
//DPosVote Dpos共识的节点投票,为达成共识用。
message DPosVote {
VoteItem voteItem = 1;
int64 voteTimestamp = 2; //发起投票的时间
int32 voterNodeIndex = 3; //投票节点索引
bytes voterNodeAddress = 4; //投票节点地址
bytes signature = 5; //投票者签名
}
//DPosVoteReply 投票响应。
message DPosVoteReply {
DPosVote vote = 1;
}
//DPosNotify Dpos委托节点出块周期结束时,通知其他节点进行高度确认及新节点投票。
message DPosNotify {
VoteItem vote = 1;
int64 heightStop = 2; //新节点负责出块的结束高度
bytes hashStop = 3; //新节点负责出块的结束hash
int64 notifyTimestamp = 4; //发起通知的时间
int32 notifyNodeIndex = 5; //通知节点的索引
bytes notifyNodeAddress= 6; //通知节点的地址
bytes signature = 7; //通知节点的签名
}
//DPosCBInfo Cycle boundary注册信息。
message DPosCBInfo {
int64 cycle = 1;
int64 stopHeight = 2;
string stopHash = 3;
string pubkey = 4;
string signature = 5;
}
\ No newline at end of file
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package dpos Uses nacl's secret_box to encrypt a net.Conn.
// It is (meant to be) an implementation of the STS protocol.
// Note we do not (yet) assume that a remote peer's pubkey
// is known ahead of time, and thus we are technically
// still vulnerable to MITM. (TODO!)
// See docs/sts-final.pdf for more info
package dpos
import (
"bytes"
crand "crypto/rand"
"crypto/sha256"
"encoding/binary"
"errors"
"io"
"net"
"time"
"github.com/33cn/chain33/common/crypto"
"github.com/33cn/plugin/plugin/consensus/dpos/types"
"golang.org/x/crypto/nacl/box"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/ripemd160"
)
// 2 + 1024 == 1026 total frame size
const (
dataLenSize = 2 // uint16 to describe the length, is <= dataMaxSize
dataMaxSize = 1024
totalFrameSize = dataMaxSize + dataLenSize
sealedFrameSize = totalFrameSize + secretbox.Overhead
authSigMsgSize = (32) + (64)
) // fixed size (length prefixed) byte arrays
// SecretConnection Implements net.Conn
type SecretConnection struct {
conn io.ReadWriteCloser
recvBuffer []byte
recvNonce *[24]byte
sendNonce *[24]byte
remPubKey crypto.PubKey
shrSecret *[32]byte // shared secret
}
// MakeSecretConnection Performs handshake and returns a new authenticated SecretConnection.
// Returns nil if error in handshake.
// Caller should call conn.Close()
// See docs/sts-final.pdf for more information.
func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (*SecretConnection, error) {
locPubKey := locPrivKey.PubKey()
// Generate ephemeral keys for perfect forward secrecy.
locEphPub, locEphPriv := genEphKeys()
// Write local ephemeral pubkey and receive one too.
// NOTE: every 32-byte string is accepted as a Curve25519 public key
// (see DJB's Curve25519 paper: http://cr.yp.to/ecdh/curve25519-20060209.pdf)
remEphPub, err := shareEphPubKey(conn, locEphPub)
if err != nil {
return nil, err
}
// Compute common shared secret.
shrSecret := computeSharedSecret(remEphPub, locEphPriv)
// Sort by lexical order.
loEphPub, hiEphPub := sort32(locEphPub, remEphPub)
// Check if the local ephemeral public key
// was the least, lexicographically sorted.
locIsLeast := bytes.Equal(locEphPub[:], loEphPub[:])
// Generate nonces to use for secretbox.
recvNonce, sendNonce := genNonces(loEphPub, hiEphPub, locIsLeast)
// Generate common challenge to sign.
challenge := genChallenge(loEphPub, hiEphPub)
// Construct SecretConnection.
sc := &SecretConnection{
conn: conn,
recvBuffer: nil,
recvNonce: recvNonce,
sendNonce: sendNonce,
shrSecret: shrSecret,
}
// Sign the challenge bytes for authentication.
locSignature := signChallenge(challenge, locPrivKey)
// Share (in secret) each other's pubkey & challenge signature
authSigMsg, err := shareAuthSignature(sc, locPubKey, locSignature)
if err != nil {
return nil, err
}
remPubKey, remSignature := authSigMsg.Key, authSigMsg.Sig
if !remPubKey.VerifyBytes(challenge[:], remSignature) {
return nil, errors.New("Challenge verification failed")
}
// We've authorized.
sc.remPubKey = remPubKey
return sc, nil
}
// RemotePubKey Returns authenticated remote pubkey
func (sc *SecretConnection) RemotePubKey() crypto.PubKey {
return sc.remPubKey
}
// Writes encrypted frames of `sealedFrameSize`
// CONTRACT: data smaller than dataMaxSize is read atomically.
func (sc *SecretConnection) Write(data []byte) (n int, err error) {
for 0 < len(data) {
var frame = make([]byte, totalFrameSize)
var chunk []byte
if dataMaxSize < len(data) {
chunk = data[:dataMaxSize]
data = data[dataMaxSize:]
} else {
chunk = data
data = nil
}
chunkLength := len(chunk)
binary.BigEndian.PutUint16(frame, uint16(chunkLength))
copy(frame[dataLenSize:], chunk)
// encrypt the frame
var sealedFrame = make([]byte, sealedFrameSize)
secretbox.Seal(sealedFrame[:0], frame, sc.sendNonce, sc.shrSecret)
// fmt.Printf("secretbox.Seal(sealed:%X,sendNonce:%X,shrSecret:%X\n", sealedFrame, sc.sendNonce, sc.shrSecret)
incr2Nonce(sc.sendNonce)
// end encryption
_, err := sc.conn.Write(sealedFrame)
if err != nil {
return n, err
}
n += len(chunk)
}
return
}
// CONTRACT: data smaller than dataMaxSize is read atomically.
func (sc *SecretConnection) Read(data []byte) (n int, err error) {
if 0 < len(sc.recvBuffer) {
count := copy(data, sc.recvBuffer)
sc.recvBuffer = sc.recvBuffer[count:]
return
}
sealedFrame := make([]byte, sealedFrameSize)
_, err = io.ReadFull(sc.conn, sealedFrame)
if err != nil {
return
}
// decrypt the frame
var frame = make([]byte, totalFrameSize)
// fmt.Printf("secretbox.Open(sealed:%X,recvNonce:%X,shrSecret:%X\n", sealedFrame, sc.recvNonce, sc.shrSecret)
_, ok := secretbox.Open(frame[:0], sealedFrame, sc.recvNonce, sc.shrSecret)
if !ok {
return n, errors.New("Failed to decrypt SecretConnection")
}
incr2Nonce(sc.recvNonce)
// end decryption
var chunkLength = binary.BigEndian.Uint16(frame) // read the first two bytes
if chunkLength > dataMaxSize {
return 0, errors.New("chunkLength is greater than dataMaxSize")
}
var chunk = frame[dataLenSize : dataLenSize+chunkLength]
n = copy(data, chunk)
sc.recvBuffer = chunk[n:]
return
}
// Close Implements net.Conn
func (sc *SecretConnection) Close() error { return sc.conn.Close() }
// LocalAddr ...
func (sc *SecretConnection) LocalAddr() net.Addr { return sc.conn.(net.Conn).LocalAddr() }
// RemoteAddr ...
func (sc *SecretConnection) RemoteAddr() net.Addr { return sc.conn.(net.Conn).RemoteAddr() }
// SetDeadline ...
func (sc *SecretConnection) SetDeadline(t time.Time) error { return sc.conn.(net.Conn).SetDeadline(t) }
// SetReadDeadline ...
func (sc *SecretConnection) SetReadDeadline(t time.Time) error {
return sc.conn.(net.Conn).SetReadDeadline(t)
}
// SetWriteDeadline ...
func (sc *SecretConnection) SetWriteDeadline(t time.Time) error {
return sc.conn.(net.Conn).SetWriteDeadline(t)
}
func genEphKeys() (ephPub, ephPriv *[32]byte) {
var err error
ephPub, ephPriv, err = box.GenerateKey(crand.Reader)
if err != nil {
types.PanicCrisis("Could not generate ephemeral keypairs")
}
return
}
func shareEphPubKey(conn io.ReadWriter, locEphPub *[32]byte) (remEphPub *[32]byte, err error) {
var err1, err2 error
Parallel(
func() {
_, err1 = conn.Write(locEphPub[:])
},
func() {
remEphPub = new([32]byte)
_, err2 = io.ReadFull(conn, remEphPub[:])
},
)
if err1 != nil {
return nil, err1
}
if err2 != nil {
return nil, err2
}
return remEphPub, nil
}
func computeSharedSecret(remPubKey, locPrivKey *[32]byte) (shrSecret *[32]byte) {
shrSecret = new([32]byte)
box.Precompute(shrSecret, remPubKey, locPrivKey)
return
}
func sort32(foo, bar *[32]byte) (lo, hi *[32]byte) {
if bytes.Compare(foo[:], bar[:]) < 0 {
lo = foo
hi = bar
} else {
lo = bar
hi = foo
}
return
}
func genNonces(loPubKey, hiPubKey *[32]byte, locIsLo bool) (recvNonce, sendNonce *[24]byte) {
nonce1 := hash24(append(loPubKey[:], hiPubKey[:]...))
nonce2 := new([24]byte)
copy(nonce2[:], nonce1[:])
nonce2[len(nonce2)-1] ^= 0x01
if locIsLo {
recvNonce = nonce1
sendNonce = nonce2
} else {
recvNonce = nonce2
sendNonce = nonce1
}
return
}
func genChallenge(loPubKey, hiPubKey *[32]byte) (challenge *[32]byte) {
return hash32(append(loPubKey[:], hiPubKey[:]...))
}
func signChallenge(challenge *[32]byte, locPrivKey crypto.PrivKey) (signature crypto.Signature) {
signature = locPrivKey.Sign(challenge[:])
return
}
type authSigMessage struct {
Key crypto.PubKey
Sig crypto.Signature
}
func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature crypto.Signature) (*authSigMessage, error) {
var recvMsg authSigMessage
var err1, err2 error
Parallel(
func() {
msgByte := make([]byte, len(pubKey.Bytes())+len(signature.Bytes()))
copy(msgByte, pubKey.Bytes())
copy(msgByte[len(pubKey.Bytes()):], signature.Bytes())
_, err1 = sc.Write(msgByte)
},
func() {
readBuffer := make([]byte, authSigMsgSize)
_, err2 = io.ReadFull(sc, readBuffer)
if err2 != nil {
return
}
//n := int(0) // not used.
//recvMsg = wire.ReadBinary(authSigMessage{}, bytes.NewBuffer(readBuffer), authSigMsgSize, &n, &err2).(authSigMessage)
//secret.Info("shareAuthSignature", "readBuffer", readBuffer)
recvMsg.Key, err2 = types.SecureConnCrypto.PubKeyFromBytes(readBuffer[:32])
if err2 != nil {
return
}
recvMsg.Sig, err2 = types.SecureConnCrypto.SignatureFromBytes(readBuffer[32:])
if err2 != nil {
return
}
})
if err1 != nil {
return nil, err1
}
if err2 != nil {
return nil, err2
}
return &recvMsg, nil
}
//--------------------------------------------------------------------------------
// sha256
func hash32(input []byte) (res *[32]byte) {
hasher := sha256.New()
_, err := hasher.Write(input) // nolint: errcheck, gas
if err != nil {
panic(err)
}
resSlice := hasher.Sum(nil)
res = new([32]byte)
copy(res[:], resSlice)
return
}
// We only fill in the first 20 bytes with ripemd160
func hash24(input []byte) (res *[24]byte) {
hasher := ripemd160.New()
_, err := hasher.Write(input) // nolint: errcheck, gas
if err != nil {
panic(err)
}
resSlice := hasher.Sum(nil)
res = new([24]byte)
copy(res[:], resSlice)
return
}
// increment nonce big-endian by 2 with wraparound.
func incr2Nonce(nonce *[24]byte) {
incrNonce(nonce)
incrNonce(nonce)
}
// increment nonce big-endian by 1 with wraparound.
func incrNonce(nonce *[24]byte) {
for i := 23; 0 <= i; i-- {
nonce[i]++
if nonce[i] != 0 {
return
}
}
}
This diff is collapsed.
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dpos
import (
"fmt"
"testing"
"time"
_ "github.com/33cn/chain33/system"
_ "github.com/33cn/plugin/plugin/dapp/init"
_ "github.com/33cn/plugin/plugin/store/init"
"github.com/stretchr/testify/assert"
)
func init() {
setParams(3, 3, 6)
}
func setParams(delegateNum int64, blockInterval int64, continueBlockNum int64) {
dposDelegateNum = delegateNum //委托节点个数,从配置读取,以后可以根据投票结果来定
dposBlockInterval = blockInterval //出块间隔,当前按3s
dposContinueBlockNum = continueBlockNum //一个委托节点当选后,一次性持续出块数量
dposCycle = dposDelegateNum * dposBlockInterval * dposContinueBlockNum
dposPeriod = dposBlockInterval * dposContinueBlockNum
}
func printTask(now int64, task *Task) {
fmt.Printf("now:%v|cycleStart:%v|cycleStop:%v|periodStart:%v|periodStop:%v|blockStart:%v|blockStop:%v|nodeId:%v\n",
now,
task.CycleStart,
task.CycleStop,
task.PeriodStart,
task.PeriodStop,
task.BlockStart,
task.BlockStop,
task.NodeID)
}
func assertTask(task *Task, t *testing.T) {
assert.Equal(t, true, task.NodeID >= 0 && task.NodeID < dposDelegateNum)
assert.Equal(t, true, task.CycleStart <= task.PeriodStart && task.PeriodStart <= task.BlockStart && task.BlockStop <= task.PeriodStop && task.PeriodStop <= task.CycleStop)
}
func TestDecideTaskByTime(t *testing.T) {
now := time.Now().Unix()
task := DecideTaskByTime(now)
printTask(now, &task)
assertTask(&task, t)
setParams(2, 1, 6)
now = time.Now().Unix()
task = DecideTaskByTime(now)
printTask(now, &task)
assertTask(&task, t)
setParams(21, 1, 12)
now = time.Now().Unix()
task = DecideTaskByTime(now)
printTask(now, &task)
assertTask(&task, t)
setParams(21, 2, 12)
now = time.Now().Unix()
task = DecideTaskByTime(now)
printTask(now, &task)
assertTask(&task, t)
setParams(2, 3, 12)
/*
for i := 0; i < 120; i++ {
now = time.Now().Unix()
task = DecideTaskByTime(now)
printTask(now, &task)
assertTask(&task, t)
time.Sleep(time.Second * 1)
}
*/
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dpos
import (
"time"
)
var (
tickTockBufferSize = 10
)
// TimeoutTicker is a timer that schedules timeouts
// conditional on the height/round/step in the timeoutInfo.
// The timeoutInfo.Duration may be non-positive.
type TimeoutTicker interface {
Start()
Stop()
Chan() <-chan timeoutInfo // on which to receive a timeout
ScheduleTimeout(ti timeoutInfo) // reset the timer
}
// timeoutTicker wraps time.Timer,
// scheduling timeouts only for greater height/round/step
// than what it's already seen.
// Timeouts are scheduled along the tickChan,
// and fired on the tockChan.
type timeoutTicker struct {
timer *time.Timer
tickChan chan timeoutInfo // for scheduling timeouts
tockChan chan timeoutInfo // for notifying about them
}
// NewTimeoutTicker returns a new TimeoutTicker.
func NewTimeoutTicker() TimeoutTicker {
tt := &timeoutTicker{
timer: time.NewTimer(0),
tickChan: make(chan timeoutInfo, tickTockBufferSize),
tockChan: make(chan timeoutInfo, tickTockBufferSize),
}
tt.stopTimer() // don't want to fire until the first scheduled timeout
return tt
}
// OnStart implements cmn.Service. It starts the timeout routine.
func (t *timeoutTicker) Start() {
go t.timeoutRoutine()
}
// OnStop implements cmn.Service. It stops the timeout routine.
func (t *timeoutTicker) Stop() {
t.stopTimer()
}
// Chan returns a channel on which timeouts are sent.
func (t *timeoutTicker) Chan() <-chan timeoutInfo {
return t.tockChan
}
// ScheduleTimeout schedules a new timeout by sending on the internal tickChan.
// The timeoutRoutine is always available to read from tickChan, so this won't block.
// The scheduling may fail if the timeoutRoutine has already scheduled a timeout for a later height/round/step.
func (t *timeoutTicker) ScheduleTimeout(ti timeoutInfo) {
t.tickChan <- ti
}
//-------------------------------------------------------------
// stop the timer and drain if necessary
func (t *timeoutTicker) stopTimer() {
// Stop() returns false if it was already fired or was stopped
if !t.timer.Stop() {
select {
case <-t.timer.C:
default:
dposlog.Debug("Timer already stopped")
}
}
}
// send on tickChan to start a new timer.
// timers are interupted and replaced by new ticks from later steps
// timeouts of 0 on the tickChan will be immediately relayed to the tockChan
func (t *timeoutTicker) timeoutRoutine() {
dposlog.Debug("Starting timeout routine")
var ti timeoutInfo
for {
select {
case newti := <-t.tickChan:
dposlog.Debug("Received tick", "old_ti", ti, "new_ti", newti)
// stop the last timer
t.stopTimer()
// update timeoutInfo and reset timer
// NOTE time.Timer allows duration to be non-positive
ti = newti
t.timer.Reset(ti.Duration)
dposlog.Debug("Scheduled timeout", "dur", ti.Duration)
case <-t.timer.C:
dposlog.Info("Timed out", "dur", ti.Duration, "state", StateTypeMapping[ti.State])
// go routine here guarantees timeoutRoutine doesn't block.
// Determinism comes from playback in the receiveRoutine.
// We can eliminate it by merging the timeoutRoutine into receiveRoutine
// and managing the timeouts ourselves with a millisecond ticker
go func(toi timeoutInfo) { t.tockChan <- toi }(ti)
}
}
}
package dpos
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestTicker(t *testing.T) {
ticker := NewTimeoutTicker()
ticker.Start()
ti := timeoutInfo{
Duration: time.Second * time.Duration(3),
State: InitStateType,
}
fmt.Println("timeoutInfo:", ti.String())
now := time.Now().Unix()
ticker.ScheduleTimeout(ti)
<-ticker.Chan()
end := time.Now().Unix()
ticker.Stop()
assert.True(t, end-now >= 2)
fmt.Println("TestTicker ok")
}
This diff is collapsed.
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types
import (
"encoding/json"
"io/ioutil"
"time"
"github.com/pkg/errors"
)
//------------------------------------------------------------
// core types for a genesis definition
// GenesisValidator is an initial validator.
type GenesisValidator struct {
PubKey KeyText `json:"pub_key"`
Name string `json:"name"`
}
// GenesisDoc defines the initial conditions for a tendermint blockchain, in particular its validator set.
type GenesisDoc struct {
GenesisTime time.Time `json:"genesis_time"`
ChainID string `json:"chain_id"`
Validators []GenesisValidator `json:"validators"`
AppHash []byte `json:"app_hash"`
AppOptions interface{} `json:"app_options,omitempty"`
}
// SaveAs is a utility method for saving GenensisDoc as a JSON file.
func (genDoc *GenesisDoc) SaveAs(file string) error {
genDocBytes, err := json.Marshal(genDoc)
if err != nil {
return err
}
return WriteFile(file, genDocBytes, 0644)
}
// ValidatorHash returns the hash of the validator set contained in the GenesisDoc
func (genDoc *GenesisDoc) ValidatorHash() []byte {
vals := make([]*Validator, len(genDoc.Validators))
for i, v := range genDoc.Validators {
if len(v.PubKey.Data) == 0 {
panic(Fmt("ValidatorHash pubkey of validator[%v] in gendoc is empty", i))
}
pubkey, err := PubKeyFromString(v.PubKey.Data)
if err != nil {
panic(Fmt("ValidatorHash PubKeyFromBytes failed:%v", err))
}
vals[i] = NewValidator(pubkey)
}
vset := NewValidatorSet(vals)
return vset.Hash()
}
// ValidateAndComplete checks that all necessary fields are present
// and fills in defaults for optional fields left empty
func (genDoc *GenesisDoc) ValidateAndComplete() error {
if genDoc.ChainID == "" {
return errors.Errorf("Genesis doc must include non-empty chain_id")
}
if len(genDoc.Validators) == 0 {
return errors.Errorf("The genesis file must have at least one validator")
}
if genDoc.GenesisTime.IsZero() {
genDoc.GenesisTime = time.Now()
}
return nil
}
//------------------------------------------------------------
// Make genesis state from file
// GenesisDocFromJSON unmarshalls JSON data into a GenesisDoc.
func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) {
genDoc := GenesisDoc{}
err := json.Unmarshal(jsonBlob, &genDoc)
if err != nil {
return nil, err
}
if err := genDoc.ValidateAndComplete(); err != nil {
return nil, err
}
return &genDoc, err
}
// GenesisDocFromFile reads JSON data from a file and unmarshalls it into a GenesisDoc.
func GenesisDocFromFile(genDocFile string) (*GenesisDoc, error) {
jsonBlob, err := ioutil.ReadFile(genDocFile)
if err != nil {
return nil, errors.Wrap(err, "Couldn't read GenesisDoc file")
}
genDoc, err := GenesisDocFromJSON(jsonBlob)
if err != nil {
return nil, errors.Wrap(err, Fmt("Error reading GenesisDoc at %v", genDocFile))
}
return genDoc, nil
}
package types
import (
"io/ioutil"
"os"
"testing"
"github.com/33cn/chain33/common/crypto"
"github.com/33cn/chain33/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
errGenesisFile = `{"genesis_time:"2018-08-16T15:38:56.951569432+08:00","chain_id":"chain33-Z2cgFX","validators":[{"pub_key":{"type":"secp256k1","data":"03EF0E1D3112CF571743A3318125EDE2E52A4EB904BCBAA4B1F75020C2846A7EB4"},"name":""},{"pub_key":{"type":"secp256k1","data":"027848E7FA630B759DB406940B5506B666A344B1060794BBF314EB459D40881BB3"},"name":""},{"pub_key":{"type":"secp256k1","data":"03F4AB6659E61E8512C9A24AC385CC1AC4D52B87D10ADBDF060086EA82BE62CDDE"},"name":""}],"app_hash":null}`
genesisFile = `{"genesis_time":"2018-08-16T15:38:56.951569432+08:00","chain_id":"chain33-Z2cgFX","validators":[{"pub_key":{"type":"secp256k1","data":"03EF0E1D3112CF571743A3318125EDE2E52A4EB904BCBAA4B1F75020C2846A7EB4"},"name":""},{"pub_key":{"type":"secp256k1","data":"027848E7FA630B759DB406940B5506B666A344B1060794BBF314EB459D40881BB3"},"name":""},{"pub_key":{"type":"secp256k1","data":"03F4AB6659E61E8512C9A24AC385CC1AC4D52B87D10ADBDF060086EA82BE62CDDE"},"name":""}],"app_hash":null}`
)
func init() {
//为了使用VRF,需要使用SECP256K1体系的公私钥
cr, err := crypto.New(types.GetSignName("", types.SECP256K1))
if err != nil {
panic("init ConsensusCrypto failed.")
}
ConsensusCrypto = cr
}
func TestGenesisDocFromFile(t *testing.T) {
os.Remove("./genesis.json")
os.Remove("../genesis.json")
ioutil.WriteFile("genesis.json", []byte(genesisFile), 0664)
genDoc, err := GenesisDocFromFile("../genesis.json")
require.NotNil(t, err)
require.Nil(t, genDoc)
genDoc, err = GenesisDocFromFile("./genesis.json")
require.NotNil(t, genDoc)
require.Nil(t, err)
os.Remove("./genesis.json")
}
func TestGenesisDocFromJSON(t *testing.T) {
genDoc, err := GenesisDocFromJSON([]byte(genesisFile))
require.NotNil(t, genDoc)
require.Nil(t, err)
assert.True(t, genDoc.ChainID == "chain33-Z2cgFX")
assert.True(t, genDoc.AppHash == nil)
assert.True(t, len(genDoc.Validators) == 3)
genDoc, err = GenesisDocFromJSON([]byte(errGenesisFile))
require.NotNil(t, err)
require.Nil(t, genDoc)
}
func TestSaveAs(t *testing.T) {
genDoc, err := GenesisDocFromJSON([]byte(genesisFile))
require.NotNil(t, genDoc)
require.Nil(t, err)
assert.True(t, genDoc.ChainID == "chain33-Z2cgFX")
assert.True(t, genDoc.AppHash == nil)
assert.True(t, len(genDoc.Validators) == 3)
err = genDoc.SaveAs("./tmp_genesis.json")
require.Nil(t, err)
genDoc2, err := GenesisDocFromFile("./tmp_genesis.json")
require.NotNil(t, genDoc2)
require.Nil(t, err)
//assert.True(t, genDoc.ChainID == genDoc2.ChainID)
//assert.True(t, genDoc.GenesisTime == genDoc2.GenesisTime)
//assert.True(t, bytes.Equal(genDoc.AppHash, genDoc2.AppHash))
assert.True(t, genDoc.Validators[0].Name == genDoc2.Validators[0].Name)
assert.True(t, genDoc.Validators[0].PubKey.Data == genDoc2.Validators[0].PubKey.Data)
assert.True(t, genDoc.Validators[0].PubKey.Kind == genDoc2.Validators[0].PubKey.Kind)
assert.True(t, genDoc.Validators[1].Name == genDoc2.Validators[1].Name)
assert.True(t, genDoc.Validators[1].PubKey.Data == genDoc2.Validators[1].PubKey.Data)
assert.True(t, genDoc.Validators[1].PubKey.Kind == genDoc2.Validators[1].PubKey.Kind)
assert.True(t, genDoc.Validators[2].Name == genDoc2.Validators[2].Name)
assert.True(t, genDoc.Validators[2].PubKey.Data == genDoc2.Validators[2].PubKey.Data)
assert.True(t, genDoc.Validators[2].PubKey.Kind == genDoc2.Validators[2].PubKey.Kind)
err = os.Remove("./tmp_genesis.json")
require.Nil(t, err)
}
func TestValidateAndComplete(t *testing.T) {
genDoc, err := GenesisDocFromJSON([]byte(genesisFile))
require.NotNil(t, genDoc)
require.Nil(t, err)
tt := genDoc.GenesisTime
setSize := len(genDoc.Validators)
err = genDoc.ValidateAndComplete()
assert.True(t, tt == genDoc.GenesisTime)
require.Nil(t, err)
assert.True(t, setSize == len(genDoc.Validators))
vals := genDoc.Validators
genDoc.Validators = nil
err = genDoc.ValidateAndComplete()
require.NotNil(t, err)
genDoc.Validators = vals
genDoc.ChainID = ""
err = genDoc.ValidateAndComplete()
require.NotNil(t, err)
}
func TestValidatorHash(t *testing.T) {
genDoc, err := GenesisDocFromJSON([]byte(genesisFile))
require.NotNil(t, genDoc)
require.Nil(t, err)
hash := genDoc.ValidatorHash()
assert.True(t, len(hash) > 0)
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types
import (
"reflect"
"time"
)
var (
// MsgMap define
MsgMap map[byte]reflect.Type
)
// step and message id define
const (
VoteID = byte(0x06)
VoteReplyID = byte(0x07)
NotifyID = byte(0x08)
CBInfoID = byte(0x09)
PacketTypePing = byte(0xff)
PacketTypePong = byte(0xfe)
)
// InitMessageMap ...
func InitMessageMap() {
MsgMap = map[byte]reflect.Type{
VoteID: reflect.TypeOf(DPosVote{}),
VoteReplyID: reflect.TypeOf(DPosVoteReply{}),
NotifyID: reflect.TypeOf(DPosNotify{}),
CBInfoID: reflect.TypeOf(DPosCBInfo{}),
}
}
// CanonicalJSONVoteItem ...
type CanonicalJSONVoteItem struct {
VotedNodeIndex int32 `json:"votedNodeIndex,omitempty"`
VotedNodeAddress []byte `json:"votedNodeAddress,omitempty"`
CycleStart int64 `json:"cycleStart,omitempty"`
CycleStop int64 `json:"cycleStop,omitempty"`
PeriodStart int64 `json:"periodStart,omitempty"`
PeriodStop int64 `json:"periodStop,omitempty"`
Height int64 `json:"height,omitempty"`
VoteID []byte `json:"voteID,omitempty"`
}
// CanonicalJSONVote ...
type CanonicalJSONVote struct {
VoteItem *CanonicalJSONVoteItem `json:"vote,omitempty"`
VoteTimestamp int64 `json:"voteTimestamp,omitempty"`
VoterNodeIndex int32 `json:"voterNodeIndex,omitempty"`
VoterNodeAddress []byte `json:"voterNodeAddress,omitempty"`
}
// CanonicalJSONOnceVote ...
type CanonicalJSONOnceVote struct {
ChainID string `json:"chain_id"`
Vote CanonicalJSONVote `json:"vote"`
}
// CanonicalVote ...
func CanonicalVote(vote *Vote) CanonicalJSONVote {
return CanonicalJSONVote{
VoteItem: &CanonicalJSONVoteItem{
VotedNodeIndex: vote.VoteItem.VotedNodeIndex,
VotedNodeAddress: vote.VoteItem.VotedNodeAddress,
CycleStart: vote.VoteItem.CycleStart,
CycleStop: vote.VoteItem.CycleStop,
PeriodStart: vote.VoteItem.PeriodStart,
PeriodStop: vote.VoteItem.PeriodStop,
Height: vote.VoteItem.Height,
VoteID: vote.VoteItem.VoteID,
},
VoteTimestamp: vote.VoteTimestamp,
VoterNodeIndex: vote.VoterNodeIndex,
VoterNodeAddress: vote.VoterNodeAddress,
}
}
// CanonicalJSONNotify ...
type CanonicalJSONNotify struct {
VoteItem *CanonicalJSONVoteItem `json:"vote,omitempty"`
HeightStop int64 `json:"heightStop,omitempty"`
NotifyTimestamp int64 `json:"notifyTimestamp,omitempty"`
}
// CanonicalJSONOnceNotify ...
type CanonicalJSONOnceNotify struct {
ChainID string `json:"chain_id"`
Notify CanonicalJSONNotify `json:"vote"`
}
// CanonicalNotify ...
func CanonicalNotify(notify *Notify) CanonicalJSONNotify {
return CanonicalJSONNotify{
VoteItem: &CanonicalJSONVoteItem{
VotedNodeIndex: notify.Vote.VotedNodeIndex,
VotedNodeAddress: notify.Vote.VotedNodeAddress,
CycleStart: notify.Vote.CycleStart,
CycleStop: notify.Vote.CycleStop,
PeriodStart: notify.Vote.PeriodStart,
PeriodStop: notify.Vote.PeriodStop,
Height: notify.Vote.Height,
VoteID: notify.Vote.VoteID,
},
HeightStop: notify.HeightStop,
NotifyTimestamp: notify.NotifyTimestamp,
}
}
// CanonicalTime ...
func CanonicalTime(t time.Time) string {
// note that sending time over go-wire resets it to
// local time, we need to force UTC here, so the
// signatures match
return t.UTC().Format(timeFormat)
}
This diff is collapsed.
package types
import (
"bytes"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"os"
"sort"
"strings"
"testing"
"time"
"github.com/33cn/chain33/common/crypto"
"github.com/33cn/chain33/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
privValidatorFile = `{"address":"2FA286246F0222C4FF93210E91AECE0C66723F15","pub_key":{"type":"secp256k1","data":"03EF0E1D3112CF571743A3318125EDE2E52A4EB904BCBAA4B1F75020C2846A7EB4"},"last_height":1679,"last_round":0,"last_step":3,"last_signature":{"type":"secp256k1","data":"37892A916D6E487ADF90F9E88FE37024597677B6C6FED47444AD582F74144B3D6E4B364EAF16AF03A4E42827B6D3C86415D734A5A6CCA92E114B23EB9265AF09"},"last_signbytes":"7B22636861696E5F6964223A22636861696E33332D5A326367466A222C22766F7465223A7B22626C6F636B5F6964223A7B2268617368223A224F6A657975396B2B4149426A6E4859456739584765356A7A462B673D222C227061727473223A7B2268617368223A6E756C6C2C22746F74616C223A307D7D2C22686569676874223A313637392C22726F756E64223A302C2274696D657374616D70223A22323031382D30382D33315430373A35313A34332E3935395A222C2274797065223A327D7D","priv_key":{"type":"secp256k1","data":"5A6A14DA6F5A42835E529D75D87CC8904544F59EEE5387A37D87EEAD194D7EB2"}}`
strAddr = "2FA286246F0222C4FF93210E91AECE0C66723F15"
strPubkey = "03EF0E1D3112CF571743A3318125EDE2E52A4EB904BCBAA4B1F75020C2846A7EB4"
addr1 = "79F9608B6826762CACCA843E81AE86837ABFFB21"
addr2 = "3480088E35099CBA75958DAE7A364A8AAD2C1BD0"
addr3 = "9FF8678DBDA4EAE2F999CBFBCBD8F5F3FC47FBAE"
addr4 = "70A51AD9777EF1F97250F7E4C156D8637BC7143C"
)
func init() {
//为了使用VRF,需要使用SECP256K1体系的公私钥
cr, err := crypto.New(types.GetSignName("", types.SECP256K1))
if err != nil {
panic("init ConsensusCrypto failed.")
}
ConsensusCrypto = cr
}
func save(filename, filecontent string) {
f, err := os.Create(filename)
if err != nil {
fmt.Println("err = ", err)
return
}
defer f.Close()
n, err := f.WriteString(filecontent)
if err != nil {
fmt.Println("err = ", err)
return
}
fmt.Println("n=", n, " contentlen=", len(filecontent))
}
func remove(filename string) {
os.Remove(filename)
}
func read(filename string) bool {
f, err := os.Open(filename)
if err != nil {
fmt.Println("err=", err)
return false
}
defer f.Close()
buf := make([]byte, 1024*2)
_, err1 := f.Read(buf)
if err1 != nil && err1 != io.EOF {
fmt.Println("err1=", err1)
return false
}
//fmt.Println("buf=",string(buf[:n]))
return true
}
func TestLoadOrGenPrivValidatorFS(t *testing.T) {
filename := "./tmp_priv_validator.json"
save(filename, privValidatorFile)
privValidator := LoadOrGenPrivValidatorFS(filename)
require.NotNil(t, privValidator)
assert.True(t, strings.EqualFold(strAddr, hex.EncodeToString(privValidator.GetAddress())))
assert.True(t, strings.EqualFold(strPubkey, hex.EncodeToString(privValidator.GetPubKey().Bytes())))
fmt.Println(privValidator.String())
remove(filename)
}
func TestGenPrivValidatorImp(t *testing.T) {
filename := "tmp_priv_validator2.json"
//save(filename, privValidatorFile)
privValidator := LoadOrGenPrivValidatorFS(filename)
require.NotNil(t, privValidator)
assert.True(t, true == read(filename))
remove(filename)
assert.True(t, false == read(filename))
privValidator.Reset()
assert.True(t, true == read(filename))
assert.True(t, len(privValidator.GetPubKey().Bytes()) > 0)
assert.True(t, len(privValidator.GetAddress()) > 0)
remove(filename)
}
func TestPrivValidatorImpSort(t *testing.T) {
var arr []*PrivValidatorImp
Addr1, _ := hex.DecodeString(addr1)
Addr2, _ := hex.DecodeString(addr2)
Addr3, _ := hex.DecodeString(addr3)
Addr4, _ := hex.DecodeString(addr4)
imp1 := &PrivValidatorImp{
Address: Addr1,
}
arr = append(arr, imp1)
imp2 := &PrivValidatorImp{
Address: Addr2,
}
arr = append(arr, imp2)
imp3 := &PrivValidatorImp{
Address: Addr3,
}
arr = append(arr, imp3)
imp4 := &PrivValidatorImp{
Address: Addr4,
}
arr = append(arr, imp4)
sort.Sort(PrivValidatorsByAddress(arr))
assert.True(t, strings.EqualFold(addr2, hex.EncodeToString(arr[0].Address)))
assert.True(t, strings.EqualFold(addr4, hex.EncodeToString(arr[1].Address)))
assert.True(t, strings.EqualFold(addr1, hex.EncodeToString(arr[2].Address)))
assert.True(t, strings.EqualFold(addr3, hex.EncodeToString(arr[3].Address)))
}
func TestSignAndVerifyVote(t *testing.T) {
filename := "./tmp_priv_validator.json"
save(filename, privValidatorFile)
privValidator := LoadOrGenPrivValidatorFS(filename)
now := time.Now().Unix()
//task := dpos.DecideTaskByTime(now)
//生成vote, 对于vote进行签名
voteItem := &VoteItem{
VotedNodeAddress: privValidator.Address,
VotedNodeIndex: int32(0),
Cycle: 100,
CycleStart: 18888,
CycleStop: 28888,
PeriodStart: 20000,
PeriodStop: 21000,
Height: 100,
}
encode, err := json.Marshal(voteItem)
if err != nil {
panic("Marshal vote failed.")
}
voteItem.VoteID = crypto.Ripemd160(encode)
vote := &Vote{
DPosVote: &DPosVote{
VoteItem: voteItem,
VoteTimestamp: now,
VoterNodeAddress: privValidator.GetAddress(),
VoterNodeIndex: int32(0),
},
}
assert.True(t, 0 == len(vote.Signature))
chainID := "test-chain-Ep9EcD"
privValidator.SignVote(chainID, vote)
assert.True(t, 0 < len(vote.Signature))
vote2 := vote.Copy()
err = vote2.Verify(chainID, privValidator.PubKey)
require.Nil(t, err)
remove(filename)
privValidator2 := LoadOrGenPrivValidatorFS(filename)
require.NotNil(t, privValidator2)
err = vote2.Verify(chainID, privValidator2.PubKey)
require.NotNil(t, err)
remove(filename)
}
func TestSignAndVerifyNotify(t *testing.T) {
filename := "./tmp_priv_validator.json"
save(filename, privValidatorFile)
privValidator := LoadOrGenPrivValidatorFS(filename)
now := time.Now().Unix()
//task := dpos.DecideTaskByTime(now)
//生成vote, 对于vote进行签名
voteItem := &VoteItem{
VotedNodeAddress: privValidator.Address,
VotedNodeIndex: int32(0),
Cycle: 100,
CycleStart: 18888,
CycleStop: 28888,
PeriodStart: 20000,
PeriodStop: 21000,
Height: 100,
}
encode, err := json.Marshal(voteItem)
if err != nil {
panic("Marshal vote failed.")
}
voteItem.VoteID = crypto.Ripemd160(encode)
chainID := "test-chain-Ep9EcD"
notify := &Notify{
DPosNotify: &DPosNotify{
Vote: voteItem,
HeightStop: 200,
HashStop: []byte("abcdef121212"),
NotifyTimestamp: now,
NotifyNodeAddress: privValidator.GetAddress(),
NotifyNodeIndex: int32(0),
},
}
err = privValidator.SignNotify(chainID, notify)
require.Nil(t, err)
notify2 := notify.Copy()
err = notify2.Verify(chainID, privValidator.PubKey)
require.Nil(t, err)
remove(filename)
privValidator2 := LoadOrGenPrivValidatorFS(filename)
require.NotNil(t, privValidator2)
err = notify2.Verify(chainID, privValidator2.PubKey)
require.NotNil(t, err)
remove(filename)
}
func TestSignMsg(t *testing.T) {
filename := "./tmp_priv_validator.json"
save(filename, privValidatorFile)
privValidator := LoadOrGenPrivValidatorFS(filename)
byteCB := []byte("asdfadsasf")
sig, err := privValidator.SignMsg(byteCB)
require.Nil(t, err)
assert.True(t, 0 < len(sig.Bytes()))
remove(filename)
}
func TestVrf(t *testing.T) {
filename := "./tmp_priv_validator.json"
save(filename, privValidatorFile)
privValidator := LoadOrGenPrivValidatorFS(filename)
input := []byte("abcdefghijklmn")
hash, proof := privValidator.VrfEvaluate(input)
assert.True(t, 32 == len(hash))
assert.True(t, 0 < len(proof))
result := privValidator.VrfProof(privValidator.PubKey.Bytes(), input, hash, proof)
assert.True(t, result)
remove(filename)
}
func TestSignTx(t *testing.T) {
filename := "./tmp_priv_validator.json"
save(filename, privValidatorFile)
privValidator := LoadOrGenPrivValidatorFS(filename)
tx := &types.Transaction{}
privValidator.SignTx(tx)
assert.True(t, types.SECP256K1 == tx.Signature.Ty)
assert.True(t, bytes.Equal(privValidator.PubKey.Bytes(), tx.Signature.Pubkey))
assert.True(t, 0 < len(tx.Signature.Signature))
remove(filename)
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"time"
"github.com/33cn/chain33/common/address"
"github.com/33cn/chain33/common/crypto"
"github.com/33cn/chain33/common/log/log15"
)
// error defines
var (
ErrNotifyInvalidValidatorAddress = errors.New("Invalid validator address for notify")
ErrNotifyInvalidValidatorIndex = errors.New("Invalid validator index for notify")
ErrNotifyInvalidSignature = errors.New("Invalid notify signature")
ErrVoteInvalidValidatorIndex = errors.New("Invalid validator index for vote")
ErrVoteInvalidValidatorAddress = errors.New("Invalid validator address for vote")
ErrVoteInvalidSignature = errors.New("Invalid vote signature")
ErrVoteNil = errors.New("Nil vote")
votelog = log15.New("module", "tendermint-vote")
ConsensusCrypto crypto.Crypto
SecureConnCrypto crypto.Crypto
)
// Signable is an interface for all signable things.
// It typically removes signatures before serializing.
type Signable interface {
WriteSignBytes(chainID string, w io.Writer, n *int, err *error)
}
// SignBytes is a convenience method for getting the bytes to sign of a Signable.
func SignBytes(chainID string, o Signable) []byte {
buf, n, err := new(bytes.Buffer), new(int), new(error)
o.WriteSignBytes(chainID, buf, n, err)
if *err != nil {
PanicCrisis(err)
}
return buf.Bytes()
}
// Vote Represents a vote from validators for consensus.
type Vote struct {
*DPosVote
}
// WriteSignBytes ...
func (vote *Vote) WriteSignBytes(chainID string, w io.Writer, n *int, err *error) {
if *err != nil {
return
}
canonical := CanonicalJSONOnceVote{
chainID,
CanonicalVote(vote),
}
byteVote, e := json.Marshal(&canonical)
if e != nil {
*err = e
votelog.Error("vote WriteSignBytes marshal failed", "err", e)
return
}
number, writeErr := w.Write(byteVote)
*n = number
*err = writeErr
}
// Copy ...
func (vote *Vote) Copy() *Vote {
voteCopy := *vote
return &voteCopy
}
func (vote *Vote) String() string {
if vote == nil {
return "nil-Vote"
}
return fmt.Sprintf("Vote{VotedNodeIndex:%v, VotedNodeAddr:%X,Cycle[%v,%v],Period[%v,%v],StartHeight:%v,VoteId:%X,VoteTimeStamp:%v,VoteNodeIndex:%v,VoteNodeAddr:%X,Sig:%X}",
vote.VoteItem.VotedNodeIndex,
Fingerprint(vote.VoteItem.VotedNodeAddress),
vote.VoteItem.CycleStart,
vote.VoteItem.CycleStop,
vote.VoteItem.PeriodStart,
vote.VoteItem.PeriodStop,
vote.VoteItem.Height,
Fingerprint(vote.VoteItem.VoteID),
CanonicalTime(time.Unix(0, vote.VoteTimestamp)),
vote.VoterNodeIndex,
Fingerprint(vote.VoterNodeAddress),
Fingerprint(vote.Signature),
)
}
// Verify ...
func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error {
addr := address.PubKeyToAddress(pubKey.Bytes()).Hash160[:]
if !bytes.Equal(addr, vote.VoterNodeAddress) {
return ErrVoteInvalidValidatorAddress
}
sig, err := ConsensusCrypto.SignatureFromBytes(vote.Signature)
if err != nil {
votelog.Error("vote Verify failed", "err", err)
return err
}
if !pubKey.VerifyBytes(SignBytes(chainID, vote), sig) {
return ErrVoteInvalidSignature
}
return nil
}
// Hash ...
func (vote *Vote) Hash() []byte {
if vote == nil {
//votelog.Error("vote hash is nil")
return nil
}
bytes, err := json.Marshal(vote)
if err != nil {
votelog.Error("vote hash marshal failed", "err", err)
return nil
}
return crypto.Ripemd160(bytes)
}
// Notify Represents a notify from validators for consensus.
type Notify struct {
*DPosNotify
}
// WriteSignBytes ...
func (notify *Notify) WriteSignBytes(chainID string, w io.Writer, n *int, err *error) {
if *err != nil {
return
}
canonical := CanonicalJSONOnceNotify{
chainID,
CanonicalNotify(notify),
}
byteVote, e := json.Marshal(&canonical)
if e != nil {
*err = e
votelog.Error("vote WriteSignBytes marshal failed", "err", e)
return
}
number, writeErr := w.Write(byteVote)
*n = number
*err = writeErr
}
// Copy ...
func (notify *Notify) Copy() *Notify {
notifyCopy := *notify
return &notifyCopy
}
func (notify *Notify) String() string {
if notify == nil {
return "nil-notify"
}
return fmt.Sprintf("Notify{VotedNodeIndex:%v, VotedNodeAddr:%X,Cycle[%v,%v],Period[%v,%v],StartHeight:%v,VoteId:%X,NotifyTimeStamp:%v,HeightStop:%v,NotifyNodeIndex:%v,NotifyNodeAddr:%X,Sig:%X}",
notify.Vote.VotedNodeIndex,
Fingerprint(notify.Vote.VotedNodeAddress),
notify.Vote.CycleStart,
notify.Vote.CycleStop,
notify.Vote.PeriodStart,
notify.Vote.PeriodStop,
notify.Vote.Height,
Fingerprint(notify.Vote.VoteID),
CanonicalTime(time.Unix(0, notify.NotifyTimestamp)),
notify.HeightStop,
notify.NotifyNodeIndex,
Fingerprint(notify.NotifyNodeAddress),
Fingerprint(notify.Signature),
)
}
// Verify ...
func (notify *Notify) Verify(chainID string, pubKey crypto.PubKey) error {
addr := address.PubKeyToAddress(pubKey.Bytes()).Hash160[:]
if !bytes.Equal(addr, notify.NotifyNodeAddress) {
return ErrNotifyInvalidValidatorAddress
}
sig, err := ConsensusCrypto.SignatureFromBytes(notify.Signature)
if err != nil {
votelog.Error("Notify Verify failed", "err", err)
return err
}
if !pubKey.VerifyBytes(SignBytes(chainID, notify), sig) {
return ErrNotifyInvalidSignature
}
return nil
}
// Hash ...
func (notify *Notify) Hash() []byte {
if notify == nil {
//votelog.Error("vote hash is nil")
return nil
}
bytes, err := json.Marshal(notify)
if err != nil {
votelog.Error("vote hash marshal failed", "err", err)
return nil
}
return crypto.Ripemd160(bytes)
}
package types
import (
"encoding/json"
"github.com/33cn/chain33/common/crypto"
"github.com/33cn/chain33/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"testing"
"time"
)
func init() {
//为了使用VRF,需要使用SECP256K1体系的公私钥
cr, err := crypto.New(types.GetSignName("", types.SECP256K1))
if err != nil {
panic("init ConsensusCrypto failed.")
}
ConsensusCrypto = cr
}
func TestVote(t *testing.T) {
filename := "./tmp_priv_validator.json"
save(filename, privValidatorFile)
privValidator := LoadOrGenPrivValidatorFS(filename)
now := time.Now().Unix()
//task := dpos.DecideTaskByTime(now)
//生成vote, 对于vote进行签名
voteItem := &VoteItem{
VotedNodeAddress: privValidator.Address,
VotedNodeIndex: int32(0),
Cycle: 100,
CycleStart: 18888,
CycleStop: 28888,
PeriodStart: 20000,
PeriodStop: 21000,
Height: 100,
}
encode, err := json.Marshal(voteItem)
if err != nil {
panic("Marshal vote failed.")
}
voteItem.VoteID = crypto.Ripemd160(encode)
vote := &Vote{
DPosVote: &DPosVote{
VoteItem: voteItem,
VoteTimestamp: now,
VoterNodeAddress: privValidator.GetAddress(),
VoterNodeIndex: int32(0),
},
}
assert.True(t, 0 == len(vote.Signature))
chainID := "test-chain-Ep9EcD"
privValidator.SignVote(chainID, vote)
assert.True(t, 0 <= len(vote.Signature))
vote2 := vote.Copy()
err = vote2.Verify(chainID, privValidator.PubKey)
require.Nil(t, err)
assert.True(t, 0 < len(vote.Hash()))
remove(filename)
}
func TestNotify(t *testing.T) {
filename := "./tmp_priv_validator.json"
save(filename, privValidatorFile)
privValidator := LoadOrGenPrivValidatorFS(filename)
now := time.Now().Unix()
//task := dpos.DecideTaskByTime(now)
//生成vote, 对于vote进行签名
voteItem := &VoteItem{
VotedNodeAddress: privValidator.Address,
VotedNodeIndex: int32(0),
Cycle: 100,
CycleStart: 18888,
CycleStop: 28888,
PeriodStart: 20000,
PeriodStop: 21000,
Height: 100,
}
encode, err := json.Marshal(voteItem)
if err != nil {
panic("Marshal vote failed.")
}
voteItem.VoteID = crypto.Ripemd160(encode)
chainID := "test-chain-Ep9EcD"
notify := &Notify{
DPosNotify: &DPosNotify{
Vote: voteItem,
HeightStop: 200,
HashStop: []byte("abcdef121212"),
NotifyTimestamp: now,
NotifyNodeAddress: privValidator.GetAddress(),
NotifyNodeIndex: int32(0),
},
}
err = privValidator.SignNotify(chainID, notify)
require.Nil(t, err)
notify2 := notify.Copy()
err = notify2.Verify(chainID, privValidator.PubKey)
require.Nil(t, err)
assert.True(t, 0 < len(notify.Hash()))
remove(filename)
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types
import (
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"sync"
"syscall"
"time"
)
const (
// RFC3339Millis ...
RFC3339Millis = "2006-01-02T15:04:05.000Z" // forced microseconds
timeFormat = RFC3339Millis
)
var (
randgen *rand.Rand
// Fmt ...
Fmt = fmt.Sprintf
once sync.Once
)
// Init ...
func Init() {
once.Do(func() {
if randgen == nil {
randgen = rand.New(rand.NewSource(time.Now().UnixNano()))
}
})
}
// WriteFile ...
func WriteFile(filePath string, contents []byte, mode os.FileMode) error {
return ioutil.WriteFile(filePath, contents, mode)
}
// WriteFileAtomic ...
func WriteFileAtomic(filePath string, newBytes []byte, mode os.FileMode) error {
dir := filepath.Dir(filePath)
f, err := ioutil.TempFile(dir, "")
if err != nil {
return err
}
_, err = f.Write(newBytes)
if err == nil {
err = f.Sync()
}
if closeErr := f.Close(); err == nil {
err = closeErr
}
if permErr := os.Chmod(f.Name(), mode); err == nil {
err = permErr
}
if err == nil {
err = os.Rename(f.Name(), filePath)
}
// any err should result in full cleanup
if err != nil {
if er := os.Remove(f.Name()); er != nil {
fmt.Printf("WriteFileAtomic Remove failed:%v", er)
}
}
return err
}
// Tempfile ...
func Tempfile(prefix string) (*os.File, string) {
file, err := ioutil.TempFile("", prefix)
if err != nil {
panic(err)
}
return file, file.Name()
}
// Fingerprint ...
func Fingerprint(slice []byte) []byte {
fingerprint := make([]byte, 6)
copy(fingerprint, slice)
return fingerprint
}
// Kill ...
func Kill() error {
p, err := os.FindProcess(os.Getpid())
if err != nil {
return err
}
return p.Signal(syscall.SIGTERM)
}
// Exit ...
func Exit(s string) {
fmt.Printf(s + "\n")
os.Exit(1)
}
// Parallel ...
func Parallel(tasks ...func()) {
var wg sync.WaitGroup
wg.Add(len(tasks))
for _, task := range tasks {
go func(task func()) {
task()
wg.Done()
}(task)
}
wg.Wait()
}
// MinInt ...
func MinInt(a, b int) int {
if a < b {
return a
}
return b
}
// MaxInt ...
func MaxInt(a, b int) int {
if a > b {
return a
}
return b
}
// RandIntn ...
func RandIntn(n int) int {
if n <= 0 {
panic("invalid argument to Intn")
}
if n <= 1<<31-1 {
//randMux.Lock()
i32 := randgen.Int31n(int32(n))
//randMux.Unlock()
return int(i32)
}
//randMux.Lock()
i64 := randgen.Int63n(int64(n))
//randMux.Unlock()
return int(i64)
}
// RandUint32 ...
func RandUint32() uint32 {
//randMux.Lock()
u32 := randgen.Uint32()
//randMux.Unlock()
return u32
}
// RandInt63n ...
func RandInt63n(n int64) int64 {
//randMux.Lock()
i64 := randgen.Int63n(n)
//randMux.Unlock()
return i64
}
// PanicSanity ...
func PanicSanity(v interface{}) {
panic(Fmt("Panicked on a Sanity Check: %v", v))
}
// PanicCrisis ...
func PanicCrisis(v interface{}) {
panic(Fmt("Panicked on a Crisis: %v", v))
}
// PanicQ ...
func PanicQ(v interface{}) {
panic(Fmt("Panicked questionably: %v", v))
}
package types
import (
"bytes"
"fmt"
"os"
"os/signal"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func init() {
Init()
}
func TestWriteFile(t *testing.T) {
filename := "./tmp_priv_validator.json"
err := WriteFile(filename, []byte(privValidatorFile), 0664)
require.Nil(t, err)
file, err := os.Stat(filename)
require.Nil(t, err)
//assert.True(t, file.Mode() == 077)
fmt.Println(file.Name())
fmt.Println(file.Mode())
assert.True(t, file.Name() == "tmp_priv_validator.json")
assert.True(t, file.Mode() == 0664)
remove(filename)
}
func TestWriteFileAtomic(t *testing.T) {
filename := "./tmp_priv_validator.json"
err := WriteFileAtomic(filename, []byte(privValidatorFile), 0664)
require.Nil(t, err)
file, err := os.Stat(filename)
require.Nil(t, err)
//assert.True(t, file.Mode() == 077)
fmt.Println(file.Name())
fmt.Println(file.Mode())
assert.True(t, file.Name() == "tmp_priv_validator.json")
assert.True(t, file.Mode() == 0664)
remove(filename)
}
func TestTempfile(t *testing.T) {
filename := "tmp_priv_validator.json"
file, name := Tempfile(filename)
fmt.Println(name)
require.NotNil(t, file)
_, err := file.Write([]byte(privValidatorFile))
if err == nil {
err = file.Sync()
}
require.Nil(t, err)
if closeErr := file.Close(); err == nil {
err = closeErr
}
require.Nil(t, err)
if permErr := os.Chmod(file.Name(), 0777); err == nil {
err = permErr
}
require.Nil(t, err)
remove(name)
}
func TestFingerprint(t *testing.T) {
arr := []byte("abdcdfasdf")
finger := Fingerprint(arr)
assert.True(t, bytes.Equal(finger, arr[0:6]))
}
func TestKill(t *testing.T) {
c := make(chan os.Signal)
signal.Notify(c)
go Kill()
s := <-c
assert.True(t, s.String() == "terminated")
}
var (
goIndex = 0
goIndexMutex sync.Mutex
goSum = 0
goSumMutex sync.Mutex
)
func test() {
goIndexMutex.Lock()
goIndex++
goIndexMutex.Unlock()
time.Sleep(time.Second * time.Duration(goIndex))
goSumMutex.Lock()
goSum++
goSumMutex.Unlock()
}
func TestParallel(t *testing.T) {
f1 := test
f1()
f2 := test
f2()
assert.True(t, goSum == 2)
/*
goSumMutex.Lock()
goSum = 0
goSumMutex.Unlock()
Parallel(f1, f2)
goSumMutex.Lock()
assert.True(t, goSum == 2)
goSumMutex.Unlock()
*/
}
func TestRandInt63n(t *testing.T) {
a := RandInt63n(10)
assert.True(t, a < 10)
b := RandInt63n(9999999999999999)
assert.True(t, b < 9999999999999999)
}
func TestRandIntn(t *testing.T) {
a := RandIntn(10)
assert.True(t, a < 10)
b := RandIntn(9999999999999)
assert.True(t, b < 9999999999999)
}
func TestRandUint32(t *testing.T) {
a := RandUint32()
assert.True(t, a >= 0)
b := RandUint32()
assert.True(t, b >= 0)
}
func TestPanicSanity(t *testing.T) {
defer func() {
if r := recover(); r != nil {
//fmt.Println(r)
assert.True(t, strings.HasPrefix(r.(string), "Panicked on a Sanity Check: "))
}
}()
PanicSanity("hello")
}
func TestPanicCrisis(t *testing.T) {
defer func() {
if r := recover(); r != nil {
//fmt.Println(r)
assert.True(t, strings.HasPrefix(r.(string), "Panicked on a Crisis: "))
}
}()
PanicCrisis("hello")
}
func TestPanicQ(t *testing.T) {
defer func() {
if r := recover(); r != nil {
//fmt.Println(r)
assert.True(t, strings.HasPrefix(r.(string), "Panicked questionably: "))
}
}()
PanicQ("hello")
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types
import (
"bytes"
"sort"
"strings"
"github.com/33cn/chain33/common/crypto"
"github.com/33cn/chain33/common/merkle"
)
// Validator ...
type Validator struct {
Address []byte `json:"address"`
PubKey []byte `json:"pub_key"`
}
// NewValidator ...
func NewValidator(pubKey crypto.PubKey) *Validator {
return &Validator{
Address: GenAddressByPubKey(pubKey),
PubKey: pubKey.Bytes(),
}
}
// Copy Creates a new copy of the validator so we can mutate accum.
// Panics if the validator is nil.
func (v *Validator) Copy() *Validator {
vCopy := *v
return &vCopy
}
func (v *Validator) String() string {
if v == nil {
return "nil-Validator"
}
return Fmt("Validator{%v %v}",
v.Address,
v.PubKey)
}
// Hash computes the unique ID of a validator with a given voting power.
// It excludes the Accum value, which changes with every round.
func (v *Validator) Hash() []byte {
hashBytes := v.Address
hashBytes = append(hashBytes, v.PubKey...)
return crypto.Ripemd160(hashBytes)
}
// ValidatorSet represent a set of *Validator at a given height.
// The validators can be fetched by address or index.
// The index is in order of .Address, so the indices are fixed
// for all rounds of a given blockchain height.
// On the other hand, the .AccumPower of each validator and
// the designated .GetProposer() of a set changes every round,
// upon calling .IncrementAccum().
// NOTE: Not goroutine-safe.
// NOTE: All get/set to validators should copy the value for safety.
// TODO: consider validator Accum overflow
type ValidatorSet struct {
// NOTE: persisted via reflect, must be exported.
Validators []*Validator `json:"validators"`
}
// NewValidatorSet ...
func NewValidatorSet(vals []*Validator) *ValidatorSet {
validators := make([]*Validator, len(vals))
for i, val := range vals {
validators[i] = val.Copy()
}
sort.Sort(ValidatorsByAddress(validators))
vs := &ValidatorSet{
Validators: validators,
}
return vs
}
// Copy ...
func (valSet *ValidatorSet) Copy() *ValidatorSet {
validators := make([]*Validator, len(valSet.Validators))
for i, val := range valSet.Validators {
// NOTE: must copy, since IncrementAccum updates in place.
validators[i] = val.Copy()
}
return &ValidatorSet{
Validators: validators,
}
}
// HasAddress ...
func (valSet *ValidatorSet) HasAddress(address []byte) bool {
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
})
return idx != len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address)
}
// GetByAddress ...
func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) {
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
})
if idx != len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) {
return idx, valSet.Validators[idx].Copy()
}
return -1, nil
}
// GetByIndex returns the validator by index.
// It returns nil values if index < 0 or
// index >= len(ValidatorSet.Validators)
func (valSet *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) {
if index < 0 || index >= len(valSet.Validators) {
return nil, nil
}
val = valSet.Validators[index]
return val.Address, val.Copy()
}
// Size ...
func (valSet *ValidatorSet) Size() int {
return len(valSet.Validators)
}
// Hash ...
func (valSet *ValidatorSet) Hash() []byte {
if len(valSet.Validators) == 0 {
return nil
}
hashables := make([][]byte, len(valSet.Validators))
for i, val := range valSet.Validators {
hashables[i] = val.Hash()
}
return merkle.GetMerkleRoot(hashables)
}
// Add ...
func (valSet *ValidatorSet) Add(val *Validator) (added bool) {
val = val.Copy()
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(val.Address, valSet.Validators[i].Address) <= 0
})
if idx == len(valSet.Validators) {
valSet.Validators = append(valSet.Validators, val)
return true
} else if bytes.Equal(valSet.Validators[idx].Address, val.Address) {
return false
} else {
newValidators := make([]*Validator, len(valSet.Validators)+1)
copy(newValidators[:idx], valSet.Validators[:idx])
newValidators[idx] = val
copy(newValidators[idx+1:], valSet.Validators[idx:])
valSet.Validators = newValidators
return true
}
}
// Update ...
func (valSet *ValidatorSet) Update(val *Validator) (updated bool) {
index, sameVal := valSet.GetByAddress(val.Address)
if sameVal == nil {
return false
}
valSet.Validators[index] = val.Copy()
return true
}
// Remove ...
func (valSet *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) {
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
})
if idx == len(valSet.Validators) || !bytes.Equal(valSet.Validators[idx].Address, address) {
return nil, false
}
removedVal := valSet.Validators[idx]
newValidators := valSet.Validators[:idx]
if idx+1 < len(valSet.Validators) {
newValidators = append(newValidators, valSet.Validators[idx+1:]...)
}
valSet.Validators = newValidators
return removedVal, true
}
// Iterate ...
func (valSet *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) {
for i, val := range valSet.Validators {
stop := fn(i, val.Copy())
if stop {
break
}
}
}
func (valSet *ValidatorSet) String() string {
return valSet.StringIndented("")
}
// StringIndented ...
func (valSet *ValidatorSet) StringIndented(indent string) string {
if valSet == nil {
return "nil-ValidatorSet"
}
valStrings := []string{}
valSet.Iterate(func(index int, val *Validator) bool {
valStrings = append(valStrings, val.String())
return false
})
return Fmt(`ValidatorSet{
%s Validators:
%s %v
%s}`,
indent,
indent, strings.Join(valStrings, "\n"+indent+" "),
indent)
}
// Implements sort for sorting validators by address.
// ValidatorsByAddress ...
type ValidatorsByAddress []*Validator
func (vs ValidatorsByAddress) Len() int {
return len(vs)
}
func (vs ValidatorsByAddress) Less(i, j int) bool {
return bytes.Compare(vs[i].Address, vs[j].Address) == -1
}
func (vs ValidatorsByAddress) Swap(i, j int) {
it := vs[i]
vs[i] = vs[j]
vs[j] = it
}
package types
import (
"bytes"
"encoding/hex"
"fmt"
"strings"
"testing"
"github.com/33cn/chain33/common/crypto"
"github.com/33cn/chain33/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
pubkey1 = "027848E7FA630B759DB406940B5506B666A344B1060794BBF314EB459D40881BB3"
pubkey2 = "03F4AB6659E61E8512C9A24AC385CC1AC4D52B87D10ADBDF060086EA82BE62CDDE"
pubkey3 = "03EF0E1D3112CF571743A3318125EDE2E52A4EB904BCBAA4B1F75020C2846A7EB4"
pubkey11 = "03541AB9887951C038273648545072E5B6A46A639BFF535F3957E8150CBE2A70D7"
pubkey12 = "03F2A7AFFA090763C42B370C6F33CC3E9B6140228ABAF0591240F3B88E8792F890"
)
var (
val1 *Validator
val2 *Validator
val3 *Validator
val11 *Validator
val12 *Validator
)
func init() {
//为了使用VRF,需要使用SECP256K1体系的公私钥
cr, err := crypto.New(types.GetSignName("", types.SECP256K1))
if err != nil {
panic("init ConsensusCrypto failed.")
}
ConsensusCrypto = cr
pkbytes, _ := hex.DecodeString(pubkey1)
pk1, _ := ConsensusCrypto.PubKeyFromBytes(pkbytes)
pkbytes, _ = hex.DecodeString(pubkey2)
pk2, _ := ConsensusCrypto.PubKeyFromBytes(pkbytes)
pkbytes, _ = hex.DecodeString(pubkey3)
pk3, _ := ConsensusCrypto.PubKeyFromBytes(pkbytes)
val1 = NewValidator(pk1)
val2 = NewValidator(pk2)
val3 = NewValidator(pk3)
pkbytes, _ = hex.DecodeString(pubkey11)
pk11, _ := ConsensusCrypto.PubKeyFromBytes(pkbytes)
val11 = NewValidator(pk11)
pkbytes, _ = hex.DecodeString(pubkey12)
pk12, _ := ConsensusCrypto.PubKeyFromBytes(pkbytes)
val12 = NewValidator(pk12)
}
func TestValidator(t *testing.T) {
cval1 := val1.Copy()
assert.True(t, bytes.Equal(val1.PubKey, cval1.PubKey))
assert.True(t, bytes.Equal(val1.Address, cval1.Address))
assert.True(t, strings.HasPrefix(val2.String(), "Validator{"))
assert.True(t, len(val3.Hash()) > 0)
}
func match(index int, val *Validator) bool {
return bytes.Equal(val.Address, val1.Address)
}
func TestValidatorSet(t *testing.T) {
var vals []*Validator
vals = append(vals, val1)
vals = append(vals, val2)
vals = append(vals, val3)
valset := NewValidatorSet(vals)
//03f4ab6659e61e8512c9a24ac385cc1ac4d52b87d10adbdf060086ea82be62cdde
//027848e7fa630b759db406940b5506b666a344b1060794bbf314eb459d40881bb3
//03ef0e1d3112cf571743a3318125ede2e52a4eb904bcbaa4b1f75020c2846a7eb4
for _, v := range valset.Validators {
fmt.Println(hex.EncodeToString(v.PubKey))
}
assert.True(t, bytes.Equal(valset.Validators[0].PubKey, val2.PubKey))
assert.True(t, bytes.Equal(valset.Validators[1].PubKey, val1.PubKey))
assert.True(t, bytes.Equal(valset.Validators[2].PubKey, val3.PubKey))
assert.True(t, valset.HasAddress(val1.Address))
assert.True(t, valset.HasAddress(val2.Address))
assert.True(t, valset.HasAddress(val3.Address))
inx, val := valset.GetByAddress(val1.Address)
assert.True(t, inx == 1 && bytes.Equal(val.PubKey, val1.PubKey))
inx, val = valset.GetByAddress(val2.Address)
assert.True(t, inx == 0 && bytes.Equal(val.PubKey, val2.PubKey))
inx, val = valset.GetByAddress(val3.Address)
assert.True(t, inx == 2 && bytes.Equal(val.PubKey, val3.PubKey))
addr, val := valset.GetByIndex(1)
assert.True(t, bytes.Equal(val.PubKey, val1.PubKey))
assert.True(t, bytes.Equal(addr, val1.Address))
assert.True(t, 3 == valset.Size())
assert.True(t, 0 < len(valset.Hash()))
assert.True(t, valset.Add(val1) == false)
assert.True(t, valset.Size() == 3)
assert.True(t, valset.Add(val11) == true)
assert.True(t, valset.Size() == 4)
assert.True(t, valset.Update(val11) == true)
assert.True(t, valset.Size() == 4)
assert.True(t, valset.Update(val12) == false)
assert.True(t, valset.Size() == 4)
val, flag := valset.Remove(val11.Address)
assert.True(t, bytes.Equal(val.PubKey, val11.PubKey))
assert.True(t, flag == true)
val, flag = valset.Remove(val12.Address)
assert.True(t, flag == false)
require.Nil(t, val)
assert.True(t, valset.HasAddress(val1.Address) == true)
//fmt.Println(valset.String())
//fmt.Println(valset.StringIndented(" "))
valset.Iterate(match)
}
func TestValidatorsByAddress(t *testing.T) {
var arr ValidatorsByAddress
arr = append(arr, val1)
arr = append(arr, val2)
arr = append(arr, val3)
assert.True(t, arr.Len() == 3)
assert.True(t, arr.Less(0, 1) == false)
assert.True(t, arr.Less(0, 2) == true)
arr.Swap(0, 1)
assert.True(t, bytes.Equal(arr[0].PubKey, val2.PubKey))
}
func TestValidatorSetException(t *testing.T) {
var vals []*Validator
valset := NewValidatorSet(vals)
assert.True(t, len(valset.Validators) == 0)
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dpos
import (
"bytes"
"encoding/json"
"fmt"
"github.com/33cn/chain33/common/address"
ttypes "github.com/33cn/plugin/plugin/consensus/dpos/types"
dty "github.com/33cn/plugin/plugin/dapp/dposvote/types"
)
const (
//ShuffleTypeNoVrf shuffle type: NoVrf, use default address order
ShuffleTypeNoVrf = iota
//ShuffleTypeVrf shuffle type: Vrf
ShuffleTypeVrf
//ShuffleTypePartVrf shuffle type: PartVrf
ShuffleTypePartVrf
)
// ValidatorMgr ...
type ValidatorMgr struct {
// Immutable
ChainID string
// Validators are persisted to the database separately every time they change,
// so we can query for historical validator sets.
// Note that if s.LastBlockHeight causes a valset change,
// we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1
Validators *ttypes.ValidatorSet
VrfValidators *ttypes.ValidatorSet
NoVrfValidators *ttypes.ValidatorSet
LastCycleBoundaryInfo *dty.DposCBInfo
ShuffleCycle int64
ShuffleType int64 //0-no vrf 1-vrf 2-part vrf
// The latest AppHash we've received from calling abci.Commit()
AppHash []byte
}
// Copy makes a copy of the State for mutating.
func (s ValidatorMgr) Copy() ValidatorMgr {
mgr := ValidatorMgr{
ChainID: s.ChainID,
Validators: s.Validators.Copy(),
AppHash: s.AppHash,
ShuffleCycle: s.ShuffleCycle,
ShuffleType: s.ShuffleType,
//VrfValidators: s.VrfValidators.Copy(),
//NoVrfValidators: s.NoVrfValidators.Copy(),
//LastCycleBoundaryInfo: &dty.DposCBInfo{
// Cycle: s.LastCycleBoundaryInfo.Cycle,
// StopHeight: s.LastCycleBoundaryInfo.StopHeight,
// StopHash: s.LastCycleBoundaryInfo.StopHash,
// Pubkey: s.LastCycleBoundaryInfo.Pubkey,
// Signature: s.LastCycleBoundaryInfo.Signature,
//},
}
if s.LastCycleBoundaryInfo != nil {
mgr.LastCycleBoundaryInfo = &dty.DposCBInfo{
Cycle: s.LastCycleBoundaryInfo.Cycle,
StopHeight: s.LastCycleBoundaryInfo.StopHeight,
StopHash: s.LastCycleBoundaryInfo.StopHash,
Pubkey: s.LastCycleBoundaryInfo.Pubkey,
Signature: s.LastCycleBoundaryInfo.Signature,
}
}
if s.VrfValidators != nil {
mgr.VrfValidators = s.VrfValidators.Copy()
}
if s.NoVrfValidators != nil {
mgr.NoVrfValidators = s.NoVrfValidators.Copy()
}
return mgr
}
// Equals returns true if the States are identical.
func (s ValidatorMgr) Equals(s2 ValidatorMgr) bool {
return bytes.Equal(s.Bytes(), s2.Bytes())
}
// Bytes serializes the State using go-wire.
func (s ValidatorMgr) Bytes() []byte {
sbytes, err := json.Marshal(s)
if err != nil {
fmt.Printf("Error reading GenesisDoc: %v", err)
return nil
}
return sbytes
}
// IsEmpty returns true if the State is equal to the empty State.
func (s ValidatorMgr) IsEmpty() bool {
return s.Validators == nil // XXX can't compare to Empty
}
// GetValidators returns the last and current validator sets.
func (s ValidatorMgr) GetValidators() (current *ttypes.ValidatorSet) {
return s.Validators
}
// MakeGenesisValidatorMgr creates validators from ttypes.GenesisDoc.
func MakeGenesisValidatorMgr(genDoc *ttypes.GenesisDoc) (ValidatorMgr, error) {
err := genDoc.ValidateAndComplete()
if err != nil {
return ValidatorMgr{}, fmt.Errorf("Error in genesis file: %v", err)
}
// Make validators slice
validators := make([]*ttypes.Validator, len(genDoc.Validators))
for i, val := range genDoc.Validators {
pubKey, err := ttypes.PubKeyFromString(val.PubKey.Data)
if err != nil {
return ValidatorMgr{}, fmt.Errorf("Error validate[%v] in genesis file: %v", i, err)
}
// Make validator
validators[i] = &ttypes.Validator{
Address: address.PubKeyToAddress(pubKey.Bytes()).Hash160[:],
PubKey: pubKey.Bytes(),
}
}
return ValidatorMgr{
ChainID: genDoc.ChainID,
Validators: ttypes.NewValidatorSet(validators),
AppHash: genDoc.AppHash,
}, nil
}
// GetValidatorByIndex method
func (s *ValidatorMgr) GetValidatorByIndex(index int) (addr []byte, val *ttypes.Validator) {
if index < 0 || index >= len(s.Validators.Validators) {
return nil, nil
}
if s.ShuffleType == ShuffleTypeNoVrf {
val = s.Validators.Validators[index]
return val.Address, val.Copy()
} else if s.ShuffleType == ShuffleTypeVrf {
val = s.VrfValidators.Validators[index]
return address.PubKeyToAddress(val.PubKey).Hash160[:], val.Copy()
} else if s.ShuffleType == ShuffleTypePartVrf {
if index < len(s.VrfValidators.Validators) {
val = s.VrfValidators.Validators[index]
return address.PubKeyToAddress(val.PubKey).Hash160[:], val.Copy()
}
val = s.NoVrfValidators.Validators[index-len(s.VrfValidators.Validators)]
return address.PubKeyToAddress(val.PubKey).Hash160[:], val.Copy()
}
return nil, nil
}
// GetIndexByPubKey method
func (s *ValidatorMgr) GetIndexByPubKey(pubkey []byte) (index int) {
if nil == pubkey {
return -1
}
index = -1
if s.ShuffleType == ShuffleTypeNoVrf {
for i := 0; i < s.Validators.Size(); i++ {
if bytes.Equal(s.Validators.Validators[i].PubKey, pubkey) {
index = i
return index
}
}
} else if s.ShuffleType == ShuffleTypeVrf {
for i := 0; i < s.VrfValidators.Size(); i++ {
if bytes.Equal(s.VrfValidators.Validators[i].PubKey, pubkey) {
index = i
return index
}
}
} else if s.ShuffleType == ShuffleTypePartVrf {
for i := 0; i < s.VrfValidators.Size(); i++ {
if bytes.Equal(s.VrfValidators.Validators[i].PubKey, pubkey) {
index = i
return index
}
}
for j := 0; j < s.NoVrfValidators.Size(); j++ {
if bytes.Equal(s.NoVrfValidators.Validators[j].PubKey, pubkey) {
index = j + s.VrfValidators.Size()
return index
}
}
}
return index
}
// FillVoteItem method
func (s *ValidatorMgr) FillVoteItem(voteItem *ttypes.VoteItem) {
if s.LastCycleBoundaryInfo != nil {
voteItem.LastCBInfo = &ttypes.CycleBoundaryInfo{
Cycle: s.LastCycleBoundaryInfo.Cycle,
StopHeight: s.LastCycleBoundaryInfo.StopHeight,
StopHash: s.LastCycleBoundaryInfo.StopHash,
}
}
voteItem.ShuffleType = s.ShuffleType
for i := 0; s.Validators != nil && i < s.Validators.Size(); i++ {
node := &ttypes.SuperNode{
PubKey: s.Validators.Validators[i].PubKey,
Address: s.Validators.Validators[i].Address,
}
voteItem.Validators = append(voteItem.Validators, node)
}
for i := 0; s.VrfValidators != nil && i < s.VrfValidators.Size(); i++ {
node := &ttypes.SuperNode{
PubKey: s.VrfValidators.Validators[i].PubKey,
Address: s.VrfValidators.Validators[i].Address,
}
voteItem.VrfValidators = append(voteItem.VrfValidators, node)
}
for i := 0; s.NoVrfValidators != nil && i < s.NoVrfValidators.Size(); i++ {
node := &ttypes.SuperNode{
PubKey: s.NoVrfValidators.Validators[i].PubKey,
Address: s.NoVrfValidators.Validators[i].Address,
}
voteItem.NoVrfValidators = append(voteItem.NoVrfValidators, node)
}
}
// UpdateFromVoteItem method
func (s *ValidatorMgr) UpdateFromVoteItem(voteItem *ttypes.VoteItem) bool {
validators := voteItem.Validators
if len(s.Validators.Validators) != len(voteItem.Validators) {
return false
}
for i := 0; i < s.Validators.Size(); i++ {
if !bytes.Equal(validators[i].PubKey, s.Validators.Validators[i].PubKey) {
return false
}
}
if voteItem.LastCBInfo != nil {
if s.LastCycleBoundaryInfo == nil {
s.LastCycleBoundaryInfo = &dty.DposCBInfo{
Cycle: voteItem.LastCBInfo.Cycle,
StopHeight: voteItem.LastCBInfo.StopHeight,
StopHash: voteItem.LastCBInfo.StopHash,
}
} else if voteItem.LastCBInfo.Cycle != s.LastCycleBoundaryInfo.Cycle ||
voteItem.LastCBInfo.StopHeight != s.LastCycleBoundaryInfo.StopHeight ||
voteItem.LastCBInfo.StopHash != s.LastCycleBoundaryInfo.StopHash {
s.LastCycleBoundaryInfo = &dty.DposCBInfo{
Cycle: voteItem.LastCBInfo.Cycle,
StopHeight: voteItem.LastCBInfo.StopHeight,
StopHash: voteItem.LastCBInfo.StopHash,
}
}
}
s.ShuffleType = voteItem.ShuffleType
var vrfVals []*ttypes.Validator
for i := 0; i < len(voteItem.VrfValidators); i++ {
val := &ttypes.Validator{
Address: voteItem.VrfValidators[i].Address,
PubKey: voteItem.VrfValidators[i].PubKey,
}
vrfVals = append(vrfVals, val)
}
s.VrfValidators = ttypes.NewValidatorSet(vrfVals)
var noVrfVals []*ttypes.Validator
for i := 0; i < len(voteItem.NoVrfValidators); i++ {
val := &ttypes.Validator{
Address: voteItem.NoVrfValidators[i].Address,
PubKey: voteItem.NoVrfValidators[i].PubKey,
}
noVrfVals = append(noVrfVals, val)
}
s.NoVrfValidators = ttypes.NewValidatorSet(noVrfVals)
return true
}
This diff is collapsed.
package init
import (
_ "github.com/33cn/plugin/plugin/consensus/dpos" //auto gen
_ "github.com/33cn/plugin/plugin/consensus/para" //auto gen
_ "github.com/33cn/plugin/plugin/consensus/pbft" //auto gen
_ "github.com/33cn/plugin/plugin/consensus/raft" //auto gen
......
......@@ -66,23 +66,11 @@ minerstart=false
[mver.consensus]
fundKeyAddr = "1BQXS6TxaYYG5mADaWij4AxhZZUTpw95a5"
coinReward = 18
coinDevFund = 12
ticketPrice = 10000
powLimitBits = "0x1f00ffff"
retargetAdjustmentFactor = 4
futureBlockTime = 16
ticketFrozenTime = 5 #5s only for test
ticketWithdrawTime = 10 #10s only for test
ticketMinerWaitTime = 2 #2s only for test
maxTxNumber = 1600 #160
targetTimespan = 2304
targetTimePerBlock = 16
[mver.consensus.ForkChainParamV1]
maxTxNumber = 10000
targetTimespan = 288 #only for test
targetTimePerBlock = 2
[mver.consensus.ForkChainParamV2]
powLimitBits = "0x1f2fffff"
......
......@@ -83,23 +83,11 @@ minerstart=false
[mver.consensus]
fundKeyAddr = "1BQXS6TxaYYG5mADaWij4AxhZZUTpw95a5"
coinReward = 18
coinDevFund = 12
ticketPrice = 10000
powLimitBits = "0x1f00ffff"
retargetAdjustmentFactor = 4
futureBlockTime = 16
ticketFrozenTime = 5 #5s only for test
ticketWithdrawTime = 10 #10s only for test
ticketMinerWaitTime = 2 #2s only for test
maxTxNumber = 1600 #160
targetTimespan = 2304
targetTimePerBlock = 16
[mver.consensus.ForkChainParamV1]
maxTxNumber = 10000
targetTimespan = 288 #only for test
targetTimePerBlock = 2
[mver.consensus.ForkChainParamV2]
powLimitBits = "0x1f2fffff"
......
......@@ -53,13 +53,14 @@ func TestRaftPerf(t *testing.T) {
}
func RaftPerf() {
q, chain, s, mem, exec, cs, p2p := initEnvRaft()
defer chain.Close()
defer q.Close()
defer s.Close()
defer p2p.Close()
defer mem.Close()
defer exec.Close()
defer s.Close()
defer q.Close()
defer chain.Close()
defer cs.Close()
defer p2p.Close()
sendReplyList(q)
}
......
......@@ -36,7 +36,7 @@ var r *rand.Rand
func createConn(ip string) {
var err error
url := ip + ":8802"
url := ip + ":9802"
fmt.Println("grpc url:", url)
conn, err = grpc.Dial(url, grpc.WithInsecure())
if err != nil {
......
......@@ -70,23 +70,11 @@ minerstart=false
[mver.consensus]
fundKeyAddr = "1BQXS6TxaYYG5mADaWij4AxhZZUTpw95a5"
coinReward = 18
coinDevFund = 12
ticketPrice = 10000
powLimitBits = "0x1f00ffff"
retargetAdjustmentFactor = 4
futureBlockTime = 16
ticketFrozenTime = 5 #5s only for test
ticketWithdrawTime = 10 #10s only for test
ticketMinerWaitTime = 2 #2s only for test
maxTxNumber = 1600 #160
targetTimespan = 2304
targetTimePerBlock = 16
[mver.consensus.ForkChainParamV1]
maxTxNumber = 10000
targetTimespan = 288 #only for test
targetTimePerBlock = 2
[mver.consensus.ForkChainParamV2]
powLimitBits = "0x1f2fffff"
......
......@@ -139,7 +139,7 @@ func Put(ip string, size string, privkey string) {
fmt.Fprintln(os.Stderr, err)
return
}
url := "http://" + ip + ":8801"
url := "http://" + ip + ":9801"
if privkey == "" {
_, priv := genaddress()
privkey = common.ToHex(priv.Bytes())
......
......@@ -74,30 +74,35 @@ minerExecs=["ticket", "autonomy"]
[mver.consensus]
fundKeyAddr = "1BQXS6TxaYYG5mADaWij4AxhZZUTpw95a5"
powLimitBits = "0x1f00ffff"
maxTxNumber = 1600
[mver.consensus.ForkChainParamV1]
maxTxNumber = 10000
[mver.consensus.ForkChainParamV2]
powLimitBits = "0x1f2fffff"
[mver.consensus.ForkTicketFundAddrV1]
fundKeyAddr = "1Ji3W12KGScCM7C2p8bg635sNkayDM8MGY"
[mver.consensus.ticket]
coinReward = 18
coinDevFund = 12
ticketPrice = 10000
powLimitBits = "0x1f00ffff"
retargetAdjustmentFactor = 4
futureBlockTime = 16
ticketFrozenTime = 5
ticketWithdrawTime = 10
ticketMinerWaitTime = 2
maxTxNumber = 1600
targetTimespan = 2304
targetTimePerBlock = 16
[mver.consensus.ForkChainParamV1]
maxTxNumber = 10000
[mver.consensus.ticket.ForkChainParamV1]
targetTimespan = 288 #only for test
targetTimePerBlock = 2
[mver.consensus.ForkChainParamV2]
powLimitBits = "0x1f2fffff"
[mver.consensus.ForkTicketFundAddrV1]
fundKeyAddr = "1Ji3W12KGScCM7C2p8bg635sNkayDM8MGY"
[consensus.sub.solo]
genesis="14KEKbYtKKQm4wMthSK9J4La4nAiidGozt"
genesisBlockTime=1514533394
......
......@@ -125,7 +125,7 @@ func createTicket(minerAddr, returnAddr string, count int32, height int64) (ret
tx1.To = minerAddr
//gen payload
g := &cty.CoinsAction_Genesis{}
g.Genesis = &types.AssetsGenesis{Amount: types.GetP(height).TicketPrice}
g.Genesis = &types.AssetsGenesis{Amount: ty.GetTicketMinerParam(height).TicketPrice}
tx1.Payload = types.Encode(&cty.CoinsAction{Value: g, Ty: cty.CoinsActionGenesis})
ret = append(ret, &tx1)
......@@ -135,7 +135,7 @@ func createTicket(minerAddr, returnAddr string, count int32, height int64) (ret
tx2.To = driver.ExecAddress("ticket")
//gen payload
g = &cty.CoinsAction_Genesis{}
g.Genesis = &types.AssetsGenesis{Amount: int64(count) * types.GetP(height).TicketPrice, ReturnAddress: returnAddr}
g.Genesis = &types.AssetsGenesis{Amount: int64(count) * ty.GetTicketMinerParam(height).TicketPrice, ReturnAddress: returnAddr}
tx2.Payload = types.Encode(&cty.CoinsAction{Value: g, Ty: cty.CoinsActionGenesis})
ret = append(ret, &tx2)
......@@ -307,7 +307,7 @@ func (client *Client) getModify(beg, end int64) ([]byte, error) {
// CheckBlock ticket implete checkblock func
func (client *Client) CheckBlock(parent *types.Block, current *types.BlockDetail) error {
cfg := types.GetP(current.Block.Height)
cfg := ty.GetTicketMinerParam(current.Block.Height)
if current.Block.BlockTime-types.Now().Unix() > cfg.FutureBlockTime {
return types.ErrFutureBlock
}
......@@ -444,7 +444,8 @@ func (client *Client) getNextRequiredDifficulty(block *types.Block, bits uint32)
if block == nil {
return types.GetP(0).PowLimitBits, defaultModify, nil
}
cfg := types.GetP(block.Height)
powLimitBits := types.GetP(block.Height).PowLimitBits
cfg := ty.GetTicketMinerParam(block.Height)
blocksPerRetarget := int64(cfg.TargetTimespan / cfg.TargetTimePerBlock)
// Return the previous block's difficulty requirements if this block
// is not at a difficulty retarget interval.
......@@ -462,15 +463,15 @@ func (client *Client) getNextRequiredDifficulty(block *types.Block, bits uint32)
// worth of blocks).
firstBlock, err := client.RequestBlock(block.Height + 1 - blocksPerRetarget)
if err != nil {
return cfg.PowLimitBits, defaultModify, err
return powLimitBits, defaultModify, err
}
if firstBlock == nil {
return cfg.PowLimitBits, defaultModify, types.ErrBlockNotFound
return powLimitBits, defaultModify, types.ErrBlockNotFound
}
modify, err := client.getModify(block.Height+1-blocksPerRetarget, block.Height)
if err != nil {
return cfg.PowLimitBits, defaultModify, err
return powLimitBits, defaultModify, err
}
// Limit the amount of adjustment that can occur to the previous
// difficulty.
......@@ -496,7 +497,7 @@ func (client *Client) getNextRequiredDifficulty(block *types.Block, bits uint32)
newTarget.Div(newTarget, big.NewInt(targetTimespan))
// Limit new value to the proof of work limit.
powLimit := difficulty.CompactToBig(cfg.PowLimitBits)
powLimit := difficulty.CompactToBig(powLimitBits)
if newTarget.Cmp(powLimit) > 0 {
newTarget.Set(powLimit)
}
......@@ -542,7 +543,7 @@ func (client *Client) searchTargetTicket(parent, block *types.Block) (*ty.Ticket
continue
}
//已经到成熟期
if !ticket.GetIsGenesis() && (block.BlockTime-ticket.GetCreateTime() <= types.GetP(block.Height).TicketFrozenTime) {
if !ticket.GetIsGenesis() && (block.BlockTime-ticket.GetCreateTime() <= ty.GetTicketMinerParam(block.Height).TicketFrozenTime) {
continue
}
// 查找私钥
......@@ -638,7 +639,7 @@ func (client *Client) addMinerTx(parent, block *types.Block, diff *big.Int, priv
miner.TicketId = tid
miner.Bits = difficulty.BigToCompact(diff)
miner.Modify = modify
miner.Reward = types.GetP(block.Height).CoinReward + fee
miner.Reward = ty.GetTicketMinerParam(block.Height).CoinReward + fee
privHash, err := genPrivHash(priv, tid)
if err != nil {
return err
......
......@@ -166,6 +166,28 @@ func (a *action) votePropChange(voteProb *auty.VoteProposalChange) (*types.Recei
voteProb.ProposalID, "err", err)
return nil, err
}
// 董事会成员验证
mpBd := make(map[string]struct{})
for _, b := range cur.Board.Boards {
mpBd[b] = struct{}{}
}
for _, ch := range cur.PropChange.Changes {
if ch.Cancel {
mpBd[ch.Addr] = struct{}{}
} else {
if _, ok := mpBd[ch.Addr]; ok {
delete(mpBd, ch.Addr)
}
}
}
if _, ok := mpBd[a.fromaddr]; !ok {
err = auty.ErrNoActiveBoard
alog.Error("votePropChange ", "addr", a.fromaddr, "this addr is not active board member",
voteProb.ProposalID, "err", err)
return nil, err
}
// 更新投票记录
votes.Address = append(votes.Address, a.fromaddr)
......
......@@ -19,9 +19,17 @@ import (
"github.com/stretchr/testify/mock"
)
var (
Addr18 = "1PUiGcbsccfxW3zuvHXZBJfznziph5miAo"
PrivKey18 = "0x56942AD84CCF4788ED6DACBC005A1D0C4F91B63BCF0C99A02BE03C8DEAE71138"
Addr19 = "1EDnnePAZN48aC2hiTDzhkczfF39g1pZZX"
PrivKey19 = "0x2116459C0EC8ED01AA0EEAE35CAC5C96F94473F7816F114873291217303F6989"
)
func InitChange(t *testing.T, stateDB dbm.KV) {
act := &auty.ActiveBoard{
Boards: boards,
Boards: boards,
Revboards: []string{Addr18},
}
err := stateDB.Set(activeBoardID(), types.Encode(act))
assert.NoError(t, err)
......@@ -45,6 +53,15 @@ func TestVoteProposalChange(t *testing.T) {
voteProposalChange(t, env, exec, stateDB, kvdb, true)
}
func TestErrorVoteProposalChange(t *testing.T) {
env, exec, stateDB, kvdb := InitEnv()
InitChange(t, stateDB)
// PropChange
testPropChange(t, env, exec, stateDB, kvdb, true)
//voteProposalChange
voteErrorProposalChange(t, env, exec, stateDB, kvdb, true)
}
func TestTerminateProposalChange(t *testing.T) {
env, exec, stateDB, kvdb := InitEnv()
InitChange(t, stateDB)
......@@ -59,7 +76,7 @@ func testPropChange(t *testing.T, env *ExecEnv, exec drivers.Driver, stateDB dbm
Year: 2019,
Month: 7,
Day: 10,
Changes: []*auty.Change{{Cancel: true, Addr: AddrA}},
Changes: []*auty.Change{{Cancel: true, Addr: AddrA}, {Cancel: false, Addr: Addr18}},
StartBlockHeight: env.blockHeight + 5,
EndBlockHeight: env.blockHeight + startEndBlockPeriod + 10,
}
......@@ -281,7 +298,134 @@ func voteProposalChange(t *testing.T, env *ExecEnv, exec drivers.Driver, stateDB
err = types.Decode(value, act)
assert.NoError(t, err)
assert.Equal(t, act.Revboards[0], AddrA)
assert.Equal(t, len(act.Boards), len(boards)-1)
assert.Equal(t, len(act.Boards), len(boards))
}
func voteErrorProposalChange(t *testing.T, env *ExecEnv, exec drivers.Driver, stateDB dbm.KV, kvdb dbm.KVDB, save bool) {
api := new(apimock.QueueProtocolAPI)
api.On("StoreList", mock.Anything).Return(&types.StoreListReply{}, nil)
api.On("GetLastHeader", mock.Anything).Return(&types.Header{StateHash: []byte("")}, nil)
hear := &types.Header{StateHash: []byte("")}
api.On("GetHeaders", mock.Anything).
Return(&types.Headers{
Items: []*types.Header{hear}}, nil)
acc := &types.Account{
Currency: 0,
Balance: total * 4,
}
val := types.Encode(acc)
values := [][]byte{val}
api.On("StoreGet", mock.Anything).Return(&types.StoreReplyValue{Values: values}, nil).Once()
acc = &types.Account{
Currency: 0,
Frozen: total,
}
val1 := types.Encode(acc)
values1 := [][]byte{val1}
api.On("StoreGet", mock.Anything).Return(&types.StoreReplyValue{Values: values1}, nil).Once()
exec.SetAPI(api)
proposalID := env.txHash
type record struct {
priv string
appr bool
}
records := []record{
{PrivKey18, false},
{PrivKey19, false},
{PrivKeyA, false},
{PrivKeyB, true},
{PrivKeyC, true},
{PrivKeyD, true},
{PrivKey1, false},
{PrivKey2, false},
{PrivKey3, false},
{PrivKey4, false},
{PrivKey5, true},
{PrivKey6, true},
{PrivKey7, true},
{PrivKey8, true},
{PrivKey9, true},
{PrivKey10, true},
{PrivKey11, true},
{PrivKey12, true},
}
for i, record := range records {
opt := &auty.VoteProposalChange{
ProposalID: proposalID,
Approve: record.appr,
}
tx, err := voteProposalChangeTx(opt)
assert.NoError(t, err)
tx, err = signTx(tx, record.priv)
assert.NoError(t, err)
// 设定当前高度为投票高度
exec.SetEnv(env.startHeight, env.blockTime, env.difficulty)
receipt, err := exec.Exec(tx, int(1))
if i < 2 {
assert.Equal(t, err, auty.ErrNoActiveBoard)
} else {
assert.NoError(t, err)
assert.NotNil(t, receipt)
if save {
for _, kv := range receipt.KV {
stateDB.Set(kv.Key, kv.Value)
}
}
receiptData := &types.ReceiptData{Ty: receipt.Ty, Logs: receipt.Logs}
set, err := exec.ExecLocal(tx, receiptData, int(1))
assert.NoError(t, err)
assert.NotNil(t, set)
if save {
for _, kv := range set.KV {
kvdb.Set(kv.Key, kv.Value)
}
}
// del
set, err = exec.ExecDelLocal(tx, receiptData, int(1))
assert.NoError(t, err)
assert.NotNil(t, set)
// 每次需要重新设置
acc := &types.Account{
Currency: 0,
Frozen: total,
}
val := types.Encode(acc)
values := [][]byte{val}
api.On("StoreGet", mock.Anything).Return(&types.StoreReplyValue{Values: values}, nil).Once()
exec.SetAPI(api)
}
}
// check
// balance
accCoin := account.NewCoinsAccount()
accCoin.SetDB(stateDB)
account := accCoin.LoadExecAccount(AddrA, autonomyAddr)
assert.Equal(t, int64(0), account.Frozen)
account = accCoin.LoadExecAccount(autonomyAddr, autonomyAddr)
assert.Equal(t, proposalAmount, account.Balance)
// status
value, err := stateDB.Get(propChangeID(proposalID))
assert.NoError(t, err)
cur := &auty.AutonomyProposalChange{}
err = types.Decode(value, cur)
assert.NoError(t, err)
assert.Equal(t, int32(auty.AutonomyStatusTmintPropChange), cur.Status)
assert.Equal(t, AddrA, cur.Address)
assert.Equal(t, true, cur.VoteResult.Pass)
value, err = stateDB.Get(activeBoardID())
assert.NoError(t, err)
act := &auty.ActiveBoard{}
err = types.Decode(value, act)
assert.NoError(t, err)
assert.Equal(t, act.Revboards[0], AddrA)
assert.Equal(t, len(act.Boards), len(boards))
}
func voteProposalChangeTx(parm *auty.VoteProposalChange) (*types.Transaction, error) {
......
......@@ -23,13 +23,19 @@ func (a *action) propProject(prob *auty.ProposalProject) (*types.Receipt, error)
return nil, types.ErrInvalidAddress
}
if prob.StartBlockHeight < a.height || prob.EndBlockHeight < a.height || prob.Amount <= 0 ||
if prob.StartBlockHeight < a.height || prob.EndBlockHeight < a.height ||
prob.StartBlockHeight+startEndBlockPeriod > prob.EndBlockHeight {
alog.Error("propProject height or amount invaild", "StartBlockHeight", prob.StartBlockHeight, "EndBlockHeight",
prob.EndBlockHeight, "height", a.height, "amount", prob.Amount)
alog.Error("propProject height invaild", "StartBlockHeight", prob.StartBlockHeight, "EndBlockHeight",
prob.EndBlockHeight, "height", a.height)
return nil, auty.ErrSetBlockHeight
}
if prob.Amount <= 0 {
err := types.ErrInvalidParam
alog.Error("propProject amount invaild", "amount", prob.Amount, "error", err)
return nil, err
}
// 获取董事会成员
pboard, err := a.getActiveBoard()
if err != nil {
......
......@@ -93,7 +93,7 @@ func TestPropProject(t *testing.T) {
result := []error{
types.ErrInvalidAddress,
auty.ErrSetBlockHeight,
types.ErrInvalidParam,
auty.ErrSetBlockHeight,
types.ErrNotFound,
auty.ErrNoPeriodAmount,
......
all:
chmod +x ./build.sh
./build.sh $(OUT) $(FLAG)
\ No newline at end of file
#!/usr/bin/env bash
strpwd=$(pwd)
strcmd=${strpwd##*dapp/}
strapp=${strcmd%/cmd*}
OUT_DIR="${1}/$strapp"
#FLAG=$2
mkdir -p "${OUT_DIR}"
cp ./build/* "${OUT_DIR}"
OUT_TESTDIR="${1}/dapptest/$strapp"
mkdir -p "${OUT_TESTDIR}"
chmod +x ./build/test-rpc.sh
cp ./build/test-rpc.sh "${OUT_TESTDIR}"
#!/usr/bin/env bash
# shellcheck disable=SC2128
set -e
set -o pipefail
MAIN_HTTP=""
# shellcheck source=/dev/null
source ../dapp-test-common.sh
MAIN_HTTP=""
CASE_ERR=""
#eventId=""
#txhash=""
#color
RED='\033[1;31m'
GRE='\033[1;32m'
NOC='\033[0m'
init() {
ispara=$(echo '"'"${MAIN_HTTP}"'"' | jq '.|contains("8901")')
echo "ipara=$ispara"
}
function run_test() {
echo "run_test"
}
function main() {
MAIN_HTTP="$1"
echo "main_ip=$MAIN_HTTP"
init
echo "=========== # dposvote rpc test start============="
run_test
if [ -n "$CASE_ERR" ]; then
echo -e "${RED}=============Dposvote Rpc Test Fail=============${NOC}"
exit 1
else
echo -e "${GRE}=============Dposvote Rpc Test Pass==============${NOC}"
fi
echo "=========== # dposvote rpc test end============="
}
main "$1"
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package executor
/*
该合约主要是配合Dpos共识,完成(1)候选节点的注册、去注册、投票及查询管理。(2)Dpos共识运行过程中,得票数TopN(N为约定的受托节点数量)受托节点的VRF相关信息的分阶段发布管理。
主要过程:
(1)系统初始运行时,会有默认的几个受托节点进行共识运行。
(2)系统运行后,可以重新选举受托节点,各个候选节点需要抵押10000个币(暂未实现),注册成为候选节点。
(3)候选节点可以在社区宣传,让大家为自己投票。
(4)用户可以为自己支持的候选节点投票。投票后,资金会冻结,3天以后才可以撤销投票。
(5)系统运行过程中,每到固定区块高度时(比如10万个区块),重新获取当前投票数据,并确定下一个时间段的受托节点。
(6)受托节点进行共识出块,每个cycle(一个cycle中,各个受托节点轮番出块,直到都轮一遍)分两个阶段进行VRF信息发布:
第一个阶段,各个受托节点发布自己的VRF的M信息
第二个阶段,各个受托节点发布自己的VRF的R、P信息
上述VRF的M、R、P可以验证。
(7)新的cycle中,使用上述VRF信息进行受托节点的出块顺序的重新洗牌,按洗牌结果决定各受托节点出块的顺序
*/
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package executor
import (
log "github.com/33cn/chain33/common/log/log15"
drivers "github.com/33cn/chain33/system/dapp"
"github.com/33cn/chain33/types"
dty "github.com/33cn/plugin/plugin/dapp/dposvote/types"
)
var logger = log.New("module", "execs.dposvote")
var driverName = dty.DPosX
var (
dposDelegateNum int64 = 3 //委托节点个数,从配置读取,以后可以根据投票结果来定
dposBlockInterval int64 = 3 //出块间隔,当前按3s
dposContinueBlockNum int64 = 6 //一个委托节点当选后,一次性持续出块数量
dposCycle = dposDelegateNum * dposBlockInterval * dposContinueBlockNum
dposPeriod = dposBlockInterval * dposContinueBlockNum
blockNumToUpdateDelegate int64 = 20000
registTopNHeightLimit int64 = 100
updateTopNHeightLimit int64 = 200
)
// CycleInfo indicates the start and stop of a cycle
type CycleInfo struct {
cycle int64
cycleStart int64
cycleStop int64
}
func calcCycleByTime(now int64) *CycleInfo {
cycle := now / dposCycle
cycleStart := now - now%dposCycle
cycleStop := cycleStart + dposCycle - 1
return &CycleInfo{
cycle: cycle,
cycleStart: cycleStart,
cycleStop: cycleStop,
}
}
func calcTopNVersion(height int64) (version, left int64) {
return height / blockNumToUpdateDelegate, height % blockNumToUpdateDelegate
}
func init() {
ety := types.LoadExecutorType(driverName)
ety.InitFuncList(types.ListMethod(&DPos{}))
}
// Init DPos Executor
func Init(name string, sub []byte) {
driverName := GetName()
if name != driverName {
panic("system dapp can't be rename")
}
drivers.Register(driverName, newDposVote, types.GetDappFork(driverName, "Enable"))
//读取一下配置项,用于和共识模块一致计算cycle
dposDelegateNum = types.Conf("config.consensus.sub.dpos").GInt("delegateNum")
dposBlockInterval = types.Conf("config.consensus.sub.dpos").GInt("blockInterval")
dposContinueBlockNum = types.Conf("config.consensus.sub.dpos").GInt("continueBlockNum")
blockNumToUpdateDelegate = types.Conf("config.consensus.sub.dpos").GInt("blockNumToUpdateDelegate")
registTopNHeightLimit = types.Conf("config.consensus.sub.dpos").GInt("registTopNHeightLimit")
updateTopNHeightLimit = types.Conf("config.consensus.sub.dpos").GInt("updateTopNHeightLimit")
dposCycle = dposDelegateNum * dposBlockInterval * dposContinueBlockNum
dposPeriod = dposBlockInterval * dposContinueBlockNum
}
//DPos 执行器,用于Dpos候选节点注册、投票,VRF信息注册管理等功能
type DPos struct {
drivers.DriverBase
}
func newDposVote() drivers.Driver {
t := &DPos{}
t.SetChild(t)
t.SetExecutorType(types.LoadExecutorType(driverName))
return t
}
//GetName 获取DPos执行器的名称
func GetName() string {
return newDposVote().GetName()
}
//ExecutorOrder Exec 的时候 同时执行 ExecLocal
func (g *DPos) ExecutorOrder() int64 {
return drivers.ExecLocalSameTime
}
//GetDriverName 获取DPos执行器的名称
func (g *DPos) GetDriverName() string {
return dty.DPosX
}
// CheckReceiptExecOk return true to check if receipt ty is ok
func (g *DPos) CheckReceiptExecOk() bool {
return true
}
This diff is collapsed.
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package executor
import (
"github.com/33cn/chain33/types"
dty "github.com/33cn/plugin/plugin/dapp/dposvote/types"
)
//Exec_Regist DPos执行器注册候选节点
func (d *DPos) Exec_Regist(payload *dty.DposCandidatorRegist, tx *types.Transaction, index int) (*types.Receipt, error) {
action := NewAction(d, tx, index)
return action.Regist(payload)
}
//Exec_CancelRegist DPos执行器取消注册候选节点
func (d *DPos) Exec_CancelRegist(payload *dty.DposCandidatorCancelRegist, tx *types.Transaction, index int) (*types.Receipt, error) {
action := NewAction(d, tx, index)
return action.CancelRegist(payload)
}
//Exec_ReRegist DPos执行器重新注册候选节点
func (d *DPos) Exec_ReRegist(payload *dty.DposCandidatorRegist, tx *types.Transaction, index int) (*types.Receipt, error) {
action := NewAction(d, tx, index)
return action.ReRegist(payload)
}
//Exec_Vote DPos执行器为候选节点投票
func (d *DPos) Exec_Vote(payload *dty.DposVote, tx *types.Transaction, index int) (*types.Receipt, error) {
action := NewAction(d, tx, index)
return action.Vote(payload)
}
//Exec_CancelVote DPos执行器撤销对一个候选节点的投票
func (d *DPos) Exec_CancelVote(payload *dty.DposCancelVote, tx *types.Transaction, index int) (*types.Receipt, error) {
action := NewAction(d, tx, index)
return action.CancelVote(payload)
}
//Exec_RegistVrfM DPos执行器注册一个受托节点的Vrf M信息
func (d *DPos) Exec_RegistVrfM(payload *dty.DposVrfMRegist, tx *types.Transaction, index int) (*types.Receipt, error) {
action := NewAction(d, tx, index)
return action.RegistVrfM(payload)
}
//Exec_RegistVrfRP DPos执行器注册一个受托节点的Vrf R/P信息
func (d *DPos) Exec_RegistVrfRP(payload *dty.DposVrfRPRegist, tx *types.Transaction, index int) (*types.Receipt, error) {
action := NewAction(d, tx, index)
return action.RegistVrfRP(payload)
}
//Exec_RecordCB DPos执行器记录CycleBoundary信息
func (d *DPos) Exec_RecordCB(payload *dty.DposCBInfo, tx *types.Transaction, index int) (*types.Receipt, error) {
action := NewAction(d, tx, index)
return action.RecordCB(payload)
}
//Exec_RegistTopN DPos执行器注册某一cycle中的TOPN信息
func (d *DPos) Exec_RegistTopN(payload *dty.TopNCandidatorRegist, tx *types.Transaction, index int) (*types.Receipt, error) {
action := NewAction(d, tx, index)
return action.RegistTopN(payload)
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package executor
import (
"bytes"
"fmt"
"github.com/33cn/chain33/types"
dty "github.com/33cn/plugin/plugin/dapp/dposvote/types"
)
func (d *DPos) rollbackCand(cand *dty.CandidatorInfo, log *dty.ReceiptCandicator) {
if cand == nil || log == nil {
return
}
//如果状态发生了变化,则需要将状态恢复到前一状态
if log.StatusChange {
cand.Status = log.PreStatus
cand.Index = cand.PreIndex
}
//如果投票了,则需要把投票回滚
if log.VoteType == dty.VoteTypeVote {
for i := 0; i < len(cand.Voters); i++ {
if cand.Voters[i].Index == log.Vote.Index && cand.Voters[i].FromAddr == log.Vote.FromAddr && bytes.Equal(cand.Voters[i].Pubkey, log.Vote.Pubkey) {
cand.Voters = append(cand.Voters[0:i], cand.Voters[i+1:]...)
break
}
}
} else if log.VoteType == dty.VoteTypeCancelVote {
cand.Voters = append(cand.Voters, log.Vote)
}
}
func (d *DPos) rollbackCandVote(log *dty.ReceiptCandicator) (kvs []*types.KeyValue, err error) {
voterTable := dty.NewDposVoteTable(d.GetLocalDB())
candTable := dty.NewDposCandidatorTable(d.GetLocalDB())
if err != nil {
return nil, err
}
if log.Status == dty.CandidatorStatusRegist {
//注册回滚,cand表删除记录
err = candTable.Del(log.Pubkey)
if err != nil {
return nil, err
}
kvs, err = candTable.Save()
return kvs, err
} else if log.Status == dty.CandidatorStatusVoted {
//投票阶段回滚,回滚状态,回滚投票
candInfo := log.CandInfo
log.CandInfo = nil
//先回滚候选节点信息
d.rollbackCand(candInfo, log)
err = candTable.Replace(candInfo)
if err != nil {
return nil, err
}
kvs1, err := candTable.Save()
if err != nil {
return nil, err
}
if log.VoteType == dty.VoteTypeVote {
err = voterTable.Del([]byte(fmt.Sprintf("%018d", log.Index)))
if err != nil {
return nil, err
}
} else if log.VoteType == dty.VoteTypeCancelVote {
err = voterTable.Add(log.Vote)
if err != nil {
return nil, err
}
}
kvs2, err := voterTable.Save()
if err != nil {
return nil, err
}
kvs = append(kvs1, kvs2...)
} else if log.Status == dty.CandidatorStatusCancelRegist {
//撤销投票回滚,需要将撤销的投票还回来
candInfo := log.CandInfo
log.CandInfo = nil
//先回滚候选节点信息
d.rollbackCand(candInfo, log)
err = candTable.Replace(candInfo)
if err != nil {
return nil, err
}
kvs1, err := candTable.Save()
if err != nil {
return nil, err
}
if log.VoteType == dty.VoteTypeCancelAllVote {
for i := 0; i < len(candInfo.Voters); i++ {
err = voterTable.Add(candInfo.Voters[i])
if err != nil {
return nil, err
}
}
}
kvs2, err := voterTable.Save()
if err != nil {
return nil, err
}
kvs = append(kvs1, kvs2...)
} else if log.Status == dty.CandidatorStatusReRegist {
//注册回滚,cand表删除记录
err = candTable.Del(log.Pubkey)
if err != nil {
return nil, err
}
kvs, err = candTable.Save()
return kvs, err
}
return kvs, nil
}
func (d *DPos) rollbackVrf(log *dty.ReceiptVrf) (kvs []*types.KeyValue, err error) {
if log.Status == dty.VrfStatusMRegist {
vrfMTable := dty.NewDposVrfMTable(d.GetLocalDB())
//注册回滚,cand表删除记录
err = vrfMTable.Del([]byte(fmt.Sprintf("%018d", log.Index)))
if err != nil {
return nil, err
}
kvs, err = vrfMTable.Save()
return kvs, err
} else if log.Status == dty.VrfStatusRPRegist {
VrfRPTable := dty.NewDposVrfRPTable(d.GetLocalDB())
err = VrfRPTable.Del([]byte(fmt.Sprintf("%018d", log.Index)))
if err != nil {
return nil, err
}
kvs, err = VrfRPTable.Save()
return kvs, err
}
return nil, nil
}
func (d *DPos) rollbackCBInfo(log *dty.ReceiptCB) (kvs []*types.KeyValue, err error) {
if log.Status == dty.CBStatusRecord {
cbTable := dty.NewDposCBTable(d.GetLocalDB())
//注册回滚,cand表删除记录
err = cbTable.Del([]byte(fmt.Sprintf("%018d", log.Cycle)))
if err != nil {
return nil, err
}
kvs, err = cbTable.Save()
return kvs, err
}
return nil, nil
}
func (d *DPos) execDelLocal(receipt *types.ReceiptData) (*types.LocalDBSet, error) {
dbSet := &types.LocalDBSet{}
if receipt.GetTy() != types.ExecOk {
return dbSet, nil
}
for _, log := range receipt.Logs {
switch log.GetTy() {
case dty.TyLogCandicatorRegist, dty.TyLogCandicatorVoted, dty.TyLogCandicatorCancelVoted, dty.TyLogCandicatorCancelRegist, dty.TyLogCandicatorReRegist:
receiptLog := &dty.ReceiptCandicator{}
if err := types.Decode(log.Log, receiptLog); err != nil {
return nil, err
}
kv, err := d.rollbackCandVote(receiptLog)
if err != nil {
return nil, err
}
dbSet.KV = append(dbSet.KV, kv...)
case dty.TyLogVrfMRegist, dty.TyLogVrfRPRegist:
receiptLog := &dty.ReceiptVrf{}
if err := types.Decode(log.Log, receiptLog); err != nil {
return nil, err
}
kv, err := d.rollbackVrf(receiptLog)
if err != nil {
return nil, err
}
dbSet.KV = append(dbSet.KV, kv...)
case dty.TyLogCBInfoRecord:
receiptLog := &dty.ReceiptCB{}
if err := types.Decode(log.Log, receiptLog); err != nil {
return nil, err
}
kv, err := d.rollbackCBInfo(receiptLog)
if err != nil {
return nil, err
}
dbSet.KV = append(dbSet.KV, kv...)
case dty.TyLogTopNCandidatorRegist:
//do nothing now
}
}
return dbSet, nil
}
//ExecDelLocal_Regist method
func (d *DPos) ExecDelLocal_Regist(payload *dty.DposCandidatorRegist, tx *types.Transaction, receiptData *types.ReceiptData, index int) (*types.LocalDBSet, error) {
return d.execDelLocal(receiptData)
}
//ExecDelLocal_CancelRegist method
func (d *DPos) ExecDelLocal_CancelRegist(payload *dty.DposCandidatorCancelRegist, tx *types.Transaction, receiptData *types.ReceiptData, index int) (*types.LocalDBSet, error) {
return d.execDelLocal(receiptData)
}
//ExecDelLocal_ReRegist method
func (d *DPos) ExecDelLocal_ReRegist(payload *dty.DposCandidatorRegist, tx *types.Transaction, receiptData *types.ReceiptData, index int) (*types.LocalDBSet, error) {
return d.execDelLocal(receiptData)
}
//ExecDelLocal_Vote method
func (d *DPos) ExecDelLocal_Vote(payload *dty.DposVote, tx *types.Transaction, receiptData *types.ReceiptData, index int) (*types.LocalDBSet, error) {
return d.execDelLocal(receiptData)
}
//ExecDelLocal_CancelVote method
func (d *DPos) ExecDelLocal_CancelVote(payload *dty.DposCancelVote, tx *types.Transaction, receiptData *types.ReceiptData, index int) (*types.LocalDBSet, error) {
return d.execDelLocal(receiptData)
}
//ExecDelLocal_VrfMRegist method
func (d *DPos) ExecDelLocal_VrfMRegist(payload *dty.DposVrfMRegist, tx *types.Transaction, receiptData *types.ReceiptData, index int) (*types.LocalDBSet, error) {
return d.execDelLocal(receiptData)
}
//ExecDelLocal_VrfRPRegist method
func (d *DPos) ExecDelLocal_VrfRPRegist(payload *dty.DposVrfRPRegist, tx *types.Transaction, receiptData *types.ReceiptData, index int) (*types.LocalDBSet, error) {
return d.execDelLocal(receiptData)
}
//ExecDelLocal_RecordCB method
func (d *DPos) ExecDelLocal_RecordCB(payload *dty.DposCBInfo, tx *types.Transaction, receiptData *types.ReceiptData, index int) (*types.LocalDBSet, error) {
return d.execDelLocal(receiptData)
}
//ExecDelLocal_RegistTopN method
func (d *DPos) ExecDelLocal_RegistTopN(payload *dty.TopNCandidatorRegist, tx *types.Transaction, receiptData *types.ReceiptData, index int) (*types.LocalDBSet, error) {
return d.execDelLocal(receiptData)
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package executor
import (
"fmt"
"github.com/33cn/chain33/types"
dty "github.com/33cn/plugin/plugin/dapp/dposvote/types"
)
func (d *DPos) updateCandVote(log *dty.ReceiptCandicator) (kvs []*types.KeyValue, err error) {
voteTable := dty.NewDposVoteTable(d.GetLocalDB())
canTable := dty.NewDposCandidatorTable(d.GetLocalDB())
candInfo := log.CandInfo
log.CandInfo = nil
if log.Status == dty.CandidatorStatusRegist {
err = canTable.Add(candInfo)
if err != nil {
return nil, err
}
kvs, err = canTable.Save()
if err != nil {
return nil, err
}
} else if log.Status == dty.CandidatorStatusVoted {
voter := log.Vote
err = canTable.Replace(candInfo)
if err != nil {
return nil, err
}
kvs1, err := canTable.Save()
if err != nil {
return nil, err
}
if log.VoteType == dty.VoteTypeVote {
err = voteTable.Add(voter)
if err != nil {
return nil, err
}
} else if log.VoteType == dty.VoteTypeCancelVote {
err = voteTable.Del([]byte(fmt.Sprintf("%018d", voter.Index)))
if err != nil {
return nil, err
}
}
kvs2, err := voteTable.Save()
if err != nil {
return nil, err
}
kvs = append(kvs1, kvs2...)
} else if log.Status == dty.CandidatorStatusReRegist {
err = canTable.Replace(candInfo)
if err != nil {
return nil, err
}
kvs, err = canTable.Save()
if err != nil {
return nil, err
}
} else if log.Status == dty.CandidatorStatusCancelRegist {
err = canTable.Replace(candInfo)
if err != nil {
return nil, err
}
kvs1, err := canTable.Save()
if err != nil {
return nil, err
}
for i := 0; i < len(candInfo.Voters); i++ {
err = voteTable.Del([]byte(fmt.Sprintf("%018d", candInfo.Voters[i].Index)))
if err != nil {
return nil, err
}
}
kvs2, err := voteTable.Save()
if err != nil {
return nil, err
}
kvs = append(kvs1, kvs2...)
}
return kvs, nil
}
func (d *DPos) updateVrf(log *dty.ReceiptVrf) (kvs []*types.KeyValue, err error) {
if log.Status == dty.VrfStatusMRegist {
vrfMTable := dty.NewDposVrfMTable(d.GetLocalDB())
vrfM := &dty.DposVrfM{
Index: log.Index,
Pubkey: log.Pubkey,
Cycle: log.Cycle,
Height: log.Height,
M: log.M,
Time: log.Time,
CycleStart: log.CycleStart,
CycleMiddle: log.CycleMiddle,
CycleStop: log.CycleStop,
}
err = vrfMTable.Add(vrfM)
if err != nil {
return nil, err
}
kvs, err = vrfMTable.Save()
if err != nil {
return nil, err
}
} else if log.Status == dty.VrfStatusRPRegist {
VrfRPTable := dty.NewDposVrfRPTable(d.GetLocalDB())
vrfRP := &dty.DposVrfRP{
Index: log.Index,
Pubkey: log.Pubkey,
Cycle: log.Cycle,
Height: log.Height,
R: log.R,
P: log.P,
M: log.M,
Time: log.Time,
CycleStart: log.CycleStart,
CycleMiddle: log.CycleMiddle,
CycleStop: log.CycleStop,
}
err = VrfRPTable.Add(vrfRP)
if err != nil {
return nil, err
}
kvs, err = VrfRPTable.Save()
if err != nil {
return nil, err
}
}
return kvs, nil
}
func (d *DPos) updateCB(log *dty.ReceiptCB) (kvs []*types.KeyValue, err error) {
if log.Status == dty.CBStatusRecord {
cbTable := dty.NewDposCBTable(d.GetLocalDB())
err = cbTable.Add(log.CbInfo)
if err != nil {
return nil, err
}
kvs, err = cbTable.Save()
if err != nil {
return nil, err
}
}
return kvs, nil
}
func (d *DPos) execLocal(receipt *types.ReceiptData) (*types.LocalDBSet, error) {
dbSet := &types.LocalDBSet{}
if receipt.GetTy() != types.ExecOk {
return dbSet, nil
}
for _, item := range receipt.Logs {
if item.Ty >= dty.TyLogCandicatorRegist && item.Ty <= dty.TyLogCandicatorReRegist {
var candLog dty.ReceiptCandicator
err := types.Decode(item.Log, &candLog)
if err != nil {
return nil, err
}
kvs, err := d.updateCandVote(&candLog)
if err != nil {
return nil, err
}
dbSet.KV = append(dbSet.KV, kvs...)
} else if item.Ty >= dty.TyLogVrfMRegist && item.Ty <= dty.TyLogVrfRPRegist {
var vrfLog dty.ReceiptVrf
err := types.Decode(item.Log, &vrfLog)
if err != nil {
return nil, err
}
kvs, err := d.updateVrf(&vrfLog)
if err != nil {
return nil, err
}
dbSet.KV = append(dbSet.KV, kvs...)
} else if item.Ty == dty.TyLogCBInfoRecord {
var cbLog dty.ReceiptCB
err := types.Decode(item.Log, &cbLog)
if err != nil {
return nil, err
}
kvs, err := d.updateCB(&cbLog)
if err != nil {
return nil, err
}
dbSet.KV = append(dbSet.KV, kvs...)
} else if item.Ty == dty.TyLogTopNCandidatorRegist {
//do nothing
}
}
return dbSet, nil
}
//ExecLocal_Regist method
func (d *DPos) ExecLocal_Regist(payload *dty.DposCandidatorRegist, tx *types.Transaction, receiptData *types.ReceiptData, index int) (*types.LocalDBSet, error) {
return d.execLocal(receiptData)
}
//ExecLocal_CancelRegist method
func (d *DPos) ExecLocal_CancelRegist(payload *dty.DposCandidatorCancelRegist, tx *types.Transaction, receiptData *types.ReceiptData, index int) (*types.LocalDBSet, error) {
return d.execLocal(receiptData)
}
//ExecLocal_ReRegist method
func (d *DPos) ExecLocal_ReRegist(payload *dty.DposCandidatorRegist, tx *types.Transaction, receiptData *types.ReceiptData, index int) (*types.LocalDBSet, error) {
return d.execLocal(receiptData)
}
//ExecLocal_Vote method
func (d *DPos) ExecLocal_Vote(payload *dty.DposVote, tx *types.Transaction, receiptData *types.ReceiptData, index int) (*types.LocalDBSet, error) {
return d.execLocal(receiptData)
}
//ExecLocal_CancelVote method
func (d *DPos) ExecLocal_CancelVote(payload *dty.DposCancelVote, tx *types.Transaction, receiptData *types.ReceiptData, index int) (*types.LocalDBSet, error) {
return d.execLocal(receiptData)
}
//ExecLocal_RegistVrfM method
func (d *DPos) ExecLocal_RegistVrfM(payload *dty.DposVrfMRegist, tx *types.Transaction, receiptData *types.ReceiptData, index int) (*types.LocalDBSet, error) {
return d.execLocal(receiptData)
}
//ExecLocal_RegistVrfRP method
func (d *DPos) ExecLocal_RegistVrfRP(payload *dty.DposVrfRPRegist, tx *types.Transaction, receiptData *types.ReceiptData, index int) (*types.LocalDBSet, error) {
return d.execLocal(receiptData)
}
//ExecLocal_RecordCB method
func (d *DPos) ExecLocal_RecordCB(payload *dty.DposCBInfo, tx *types.Transaction, receiptData *types.ReceiptData, index int) (*types.LocalDBSet, error) {
return d.execLocal(receiptData)
}
//ExecLocal_RegistTopN method
func (d *DPos) ExecLocal_RegistTopN(payload *dty.TopNCandidatorRegist, tx *types.Transaction, receiptData *types.ReceiptData, index int) (*types.LocalDBSet, error) {
return d.execLocal(receiptData)
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package executor
import (
"github.com/33cn/chain33/types"
dty "github.com/33cn/plugin/plugin/dapp/dposvote/types"
)
//Query_QueryCandidatorByPubkeys method
func (d *DPos) Query_QueryCandidatorByPubkeys(in *dty.CandidatorQuery) (types.Message, error) {
return queryCands(d.GetLocalDB(), in)
}
//Query_QueryCandidatorByTopN method
func (d *DPos) Query_QueryCandidatorByTopN(in *dty.CandidatorQuery) (types.Message, error) {
return queryTopNCands(d.GetLocalDB(), in)
}
//Query_QueryVote method
func (d *DPos) Query_QueryVote(in *dty.DposVoteQuery) (types.Message, error) {
return queryVote(d.GetLocalDB(), in)
}
//Query_QueryVrfByTime method
func (d *DPos) Query_QueryVrfByTime(in *dty.DposVrfQuery) (types.Message, error) {
return queryVrfByTime(d.GetLocalDB(), in)
}
//Query_QueryVrfByCycle method
func (d *DPos) Query_QueryVrfByCycle(in *dty.DposVrfQuery) (types.Message, error) {
return queryVrfByCycle(d.GetLocalDB(), in)
}
//Query_QueryVrfByCycleForTopN method
func (d *DPos) Query_QueryVrfByCycleForTopN(in *dty.DposVrfQuery) (types.Message, error) {
return queryVrfByCycleForTopN(d.GetLocalDB(), in)
}
//Query_QueryVrfByCycleForPubkeys method
func (d *DPos) Query_QueryVrfByCycleForPubkeys(in *dty.DposVrfQuery) (types.Message, error) {
return queryVrfByCycleForPubkeys(d.GetLocalDB(), in)
}
//Query_QueryCBInfoByCycle method
func (d *DPos) Query_QueryCBInfoByCycle(in *dty.DposCBQuery) (types.Message, error) {
return queryCBInfoByCycle(d.GetLocalDB(), in)
}
//Query_QueryCBInfoByHeight method
func (d *DPos) Query_QueryCBInfoByHeight(in *dty.DposCBQuery) (types.Message, error) {
return queryCBInfoByHeight(d.GetLocalDB(), in)
}
//Query_QueryCBInfoByHash method
func (d *DPos) Query_QueryCBInfoByHash(in *dty.DposCBQuery) (types.Message, error) {
return queryCBInfoByHash(d.GetLocalDB(), in)
}
//Query_QueryTopNByVersion method
func (d *DPos) Query_QueryTopNByVersion(in *dty.TopNCandidatorsQuery) (types.Message, error) {
return queryTopNByVersion(d.GetStateDB(), in)
}
// Copyright Fuzamei Corp. 2018 All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dposvote
import (
"github.com/33cn/chain33/pluginmgr"
"github.com/33cn/plugin/plugin/dapp/dposvote/commands"
"github.com/33cn/plugin/plugin/dapp/dposvote/executor"
"github.com/33cn/plugin/plugin/dapp/dposvote/types"
)
func init() {
pluginmgr.Register(&pluginmgr.PluginBase{
Name: types.DPosX,
ExecName: executor.GetName(),
Exec: executor.Init,
Cmd: commands.DPosCmd,
})
}
all:
sh ./create_protobuf.sh
#!/bin/sh
protoc --go_out=plugins=grpc:../types ./*.proto --proto_path=. --proto_path="../../../../vendor/github.com/33cn/chain33/types/proto/"
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment