mirror of
https://gitlab.com/pulsechaincom/prysm-pulse.git
synced 2024-12-25 04:47:18 +00:00
49a0d3caf0
* Fix a few deps to work with go.mod, check in generated files * Update Gossipsub to 1.1 (#5998) * update libs * add new validators * add new deps * new set of deps * tls * further fix gossip update * get everything to build * clean up * gaz * fix build * fix all tests * add deps to images * imports Co-authored-by: rauljordan <raul@prysmaticlabs.com> * Beacon chain builds with go build * fix bazel * fix dep * lint * Add github action for testing go * on PR for any branch * fix libp2p test failure * Fix TestProcessBlock_PassesProcessingConditions by updating the proposer index in test * Revert "Fix TestProcessBlock_PassesProcessingConditions by updating the proposer index in test" This reverts commit 43676894ab01f03fe90a9b8ee3ecfbc2ec1ec4e4. * Compute and set proposer index instead of hard code * Add back go mod/sum, fix deps * go build ./... * Temporarily skip two tests * Fix kafka confluent patch * Fix kafka confluent patch * fix kafka build * fix kafka * Add info in DEPENDENCIES. Added a stub link for Why Bazel? until https://github.com/prysmaticlabs/documentation/issues/138 * Update fuzz ssz files as well * Update fuzz ssz files as well * getting closer * rollback rules_go and gazelle * fix gogo protobuf * install librdkafka-dev as part of github actions * Update kafka to a recent version where librkafkfa is not required for go modules * clarify comment * fix kafka build * disable go tests * comment * Fix geth dependencies for end to end * rename word * lint * fix docker Co-authored-by: Nishant Das <nishdas93@gmail.com> Co-authored-by: rauljordan <raul@prysmaticlabs.com> Co-authored-by: terence tsao <terence@prysmaticlabs.com>
72 lines
1.6 KiB
Go
72 lines
1.6 KiB
Go
package main
|
|
|
|
import (
|
|
"context"
|
|
"time"
|
|
|
|
v1 "k8s.io/api/core/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/client-go/kubernetes"
|
|
"k8s.io/client-go/rest"
|
|
)
|
|
|
|
var queryInterval = 3 * time.Second
|
|
var namespace = "beacon-chain"
|
|
var podSelector = "component=validator"
|
|
|
|
type watchtower struct {
|
|
db *db
|
|
client *kubernetes.Clientset
|
|
}
|
|
|
|
func newWatchtower(db *db) *watchtower {
|
|
config, err := rest.InClusterConfig()
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
client := kubernetes.NewForConfigOrDie(config)
|
|
return &watchtower{db, client}
|
|
}
|
|
|
|
// WatchPods for termination, update allocations
|
|
func (wt *watchtower) WatchPods() {
|
|
for {
|
|
time.Sleep(queryInterval)
|
|
if err := wt.queryPodsAndUpdateDB(); err != nil {
|
|
log.WithField("error", err).Error("Failed to update pods")
|
|
}
|
|
}
|
|
}
|
|
|
|
// Query k8s pods for existence.
|
|
func (wt *watchtower) queryPodsAndUpdateDB() error {
|
|
ctx := context.Background()
|
|
podNames, err := wt.db.AllocatedPodNames(ctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
pList, err := wt.client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: podSelector})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
podExist := make(map[string]bool)
|
|
for _, p := range pList.Items {
|
|
if p.Status.Phase == v1.PodRunning || p.Status.Phase == v1.PodPending {
|
|
podExist[p.Name] = true
|
|
} else {
|
|
log.Debugf("ignoring pod with phase %s", p.Status.Phase)
|
|
}
|
|
}
|
|
|
|
for _, p := range podNames {
|
|
if !podExist[p] {
|
|
log.WithField("pod", p).Debug("Removing assignment from dead pod")
|
|
if err := wt.db.RemovePKAssignment(ctx, p); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|