grpc enable instrumentation (#84)

This commit is contained in:
Alex Sharov 2021-09-21 10:10:59 +07:00 committed by GitHub
parent 47490aa942
commit 13b0978d86
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 28 additions and 6 deletions

View File

@ -15,6 +15,7 @@ import (
"google.golang.org/grpc/backoff" "google.golang.org/grpc/backoff"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive" "google.golang.org/grpc/keepalive"
"google.golang.org/grpc/reflection"
) )
func TLS(tlsCACert, tlsCertFile, tlsKeyFile string) (credentials.TransportCredentials, error) { func TLS(tlsCACert, tlsCertFile, tlsKeyFile string) (credentials.TransportCredentials, error) {
@ -60,11 +61,12 @@ func NewServer(rateLimit uint32, creds credentials.TransportCredentials) *grpc.S
//} //}
var grpcServer *grpc.Server var grpcServer *grpc.Server
reflection.Register(grpcServer)
//cpus := uint32(runtime.GOMAXPROCS(-1)) //cpus := uint32(runtime.GOMAXPROCS(-1))
opts := []grpc.ServerOption{ opts := []grpc.ServerOption{
//grpc.NumStreamWorkers(cpus), // reduce amount of goroutines //grpc.NumStreamWorkers(cpus), // reduce amount of goroutines
grpc.WriteBufferSize(1024), // reduce buffers to save mem //grpc.WriteBufferSize(1024), // reduce buffers to save mem
grpc.ReadBufferSize(1024), //grpc.ReadBufferSize(1024),
grpc.MaxConcurrentStreams(rateLimit), // to force clients reduce concurrency level grpc.MaxConcurrentStreams(rateLimit), // to force clients reduce concurrency level
// Don't drop the connection, settings accordign to this comment on GitHub // Don't drop the connection, settings accordign to this comment on GitHub
// https://github.com/grpc/grpc-go/issues/3171#issuecomment-552796779 // https://github.com/grpc/grpc-go/issues/3171#issuecomment-552796779

View File

@ -116,7 +116,7 @@ type CoherentCacheConfig struct {
var DefaultCoherentCacheConfig = CoherentCacheConfig{ var DefaultCoherentCacheConfig = CoherentCacheConfig{
KeepViews: 50, KeepViews: 50,
NewBlockWait: 50 * time.Millisecond, NewBlockWait: 50 * time.Millisecond,
KeysLimit: 400_000, KeysLimit: 1_000_000,
MetricsLabel: "default", MetricsLabel: "default",
WithStorage: true, WithStorage: true,
} }

View File

@ -36,6 +36,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive" "google.golang.org/grpc/keepalive"
"google.golang.org/grpc/reflection"
"google.golang.org/protobuf/types/known/emptypb" "google.golang.org/protobuf/types/known/emptypb"
) )
@ -290,12 +291,11 @@ func StartGrpc(txPoolServer txpool_proto.TxpoolServer, miningServer txpool_proto
//} //}
var grpcServer *grpc.Server var grpcServer *grpc.Server
reflection.Register(grpcServer) // Register reflection service on gRPC server.
//cpus := uint32(runtime.GOMAXPROCS(-1)) //cpus := uint32(runtime.GOMAXPROCS(-1))
opts := []grpc.ServerOption{ opts := []grpc.ServerOption{
//grpc.NumStreamWorkers(cpus), // reduce amount of goroutines //grpc.NumStreamWorkers(cpus), // reduce amount of goroutines
grpc.WriteBufferSize(1024), // reduce buffers to save mem
grpc.ReadBufferSize(1024),
grpc.MaxConcurrentStreams(kv.ReadersLimit - 128), // to force clients reduce concurrency level
// Don't drop the connection, settings accordign to this comment on GitHub // Don't drop the connection, settings accordign to this comment on GitHub
// https://github.com/grpc/grpc-go/issues/3171#issuecomment-552796779 // https://github.com/grpc/grpc-go/issues/3171#issuecomment-552796779
grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{

View File

@ -331,6 +331,26 @@ func (p *TxPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChang
return err return err
} }
for _, changesList := range stateChanges.ChangeBatch {
for _, change := range changesList.Changes {
switch change.Action {
case remote.Action_UPSERT, remote.Action_UPSERT_CODE:
if change.Incarnation > 0 {
continue
}
addr := gointerfaces.ConvertH160toAddress(change.Address)
id, ok := p.senders.id(string(addr[:]))
if !ok {
continue
}
nonce, balance, err := p.senders.info(cache, viewID, coreTx, id)
if err != nil {
return err
}
onSenderChange(id, nonce, balance, p.byNonce, protocolBaseFee, baseFee, p.pending, p.baseFee, p.queued)
}
}
}
//log.Debug("[txpool] new block", "unwinded", len(unwindTxs.txs), "mined", len(minedTxs.txs), "baseFee", baseFee, "blockHeight", blockHeight) //log.Debug("[txpool] new block", "unwinded", len(unwindTxs.txs), "mined", len(minedTxs.txs), "baseFee", baseFee, "blockHeight", blockHeight)
if err := addTxs(p.lastSeenBlock.Load(), cache, viewID, coreTx, p.cfg, p.senders, unwindTxs, protocolBaseFee, baseFee, p.pending, p.baseFee, p.queued, p.byNonce, p.byHash, p.addLocked, p.discardLocked); err != nil { if err := addTxs(p.lastSeenBlock.Load(), cache, viewID, coreTx, p.cfg, p.senders, unwindTxs, protocolBaseFee, baseFee, p.pending, p.baseFee, p.queued, p.byNonce, p.byHash, p.addLocked, p.discardLocked); err != nil {
return err return err