mirror of
https://github.com/zitadel/zitadel
synced 2024-11-22 00:39:36 +00:00
17953e9040
Even though this is a feature it's released as fix so that we can back port to earlier revisions. As reported by multiple users startup of ZITADEL after leaded to downtime and worst case rollbacks to the previously deployed version. The problem starts rising when there are too many events to process after the start of ZITADEL. The root cause are changes on projections (database tables) which must be recomputed. This PR solves this problem by adding a new step to the setup phase which prefills the projections. The step can be enabled by adding the `--init-projections`-flag to `setup`, `start-from-init` and `start-from-setup`. Setting this flag results in potentially longer duration of the setup phase but reduces the risk of the problems mentioned in the paragraph above.
27 lines
649 B
Go
27 lines
649 B
Go
package eventsourcing
|
|
|
|
import (
|
|
"context"
|
|
|
|
admin_handler "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/handler"
|
|
admin_view "github.com/zitadel/zitadel/internal/admin/repository/eventsourcing/view"
|
|
"github.com/zitadel/zitadel/internal/database"
|
|
"github.com/zitadel/zitadel/internal/static"
|
|
)
|
|
|
|
type Config struct {
|
|
Spooler admin_handler.Config
|
|
}
|
|
|
|
func Start(ctx context.Context, conf Config, static static.Storage, dbClient *database.DB) error {
|
|
view, err := admin_view.StartView(dbClient)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
admin_handler.Register(ctx, conf.Spooler, view, static)
|
|
admin_handler.Start(ctx)
|
|
|
|
return nil
|
|
}
|