mirror of https://github.com/agola-io/agola
905 lines
24 KiB
Go
905 lines
24 KiB
Go
|
|
// Code generated by go generate; DO NOT EDIT.
|
|
package db
|
|
|
|
import (
|
|
stdsql "database/sql"
|
|
"encoding/json"
|
|
|
|
"github.com/sorintlab/errors"
|
|
sq "github.com/huandu/go-sqlbuilder"
|
|
|
|
"agola.io/agola/internal/sqlg"
|
|
"agola.io/agola/internal/sqlg/sql"
|
|
|
|
types "agola.io/agola/services/runservice/types"
|
|
|
|
"time"
|
|
)
|
|
|
|
func (d *DB) fetchChangeGroups(tx *sql.Tx, q sq.Builder) ([]*types.ChangeGroup, []string, error) {
|
|
rows, err := d.query(tx, q)
|
|
if err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
return d.scanChangeGroups(rows, tx.ID(), 0)
|
|
}
|
|
|
|
func (d *DB) fetchChangeGroupsSkipLastFields(tx *sql.Tx, q sq.Builder, skipFieldsCount uint) ([]*types.ChangeGroup, []string, error) {
|
|
rows, err := d.query(tx, q)
|
|
if err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
return d.scanChangeGroups(rows, tx.ID(), skipFieldsCount)
|
|
}
|
|
|
|
func (d *DB) scanChangeGroup(rows *stdsql.Rows, skipFieldsCount uint) (*types.ChangeGroup, string, error) {
|
|
|
|
v := &types.ChangeGroup{}
|
|
|
|
var vi any = v
|
|
if x, ok := vi.(sqlg.Initer); ok {
|
|
x.Init()
|
|
}
|
|
|
|
fields := []any{&v.ID, &v.Revision, &v.CreationTime, &v.UpdateTime, &v.Name, &v.Value}
|
|
|
|
for i := uint(0); i < skipFieldsCount; i++ {
|
|
fields = append(fields, new(any))
|
|
}
|
|
|
|
if err := rows.Scan(fields...); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to scan row")
|
|
}
|
|
|
|
if x, ok := vi.(sqlg.PreJSONSetupper); ok {
|
|
if err := x.PreJSON(); err != nil {
|
|
return nil, "", errors.Wrap(err, "prejson error")
|
|
}
|
|
}
|
|
|
|
return v, v.ID, nil
|
|
}
|
|
|
|
func (d *DB) scanChangeGroups(rows *stdsql.Rows, txID string, skipFieldsCount uint) ([]*types.ChangeGroup, []string, error) {
|
|
vs := []*types.ChangeGroup{}
|
|
ids := []string{}
|
|
for rows.Next() {
|
|
v, id, err := d.scanChangeGroup(rows, skipFieldsCount)
|
|
if err != nil {
|
|
rows.Close()
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
v.TxID = txID
|
|
vs = append(vs, v)
|
|
ids = append(ids, id)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
return vs, ids, nil
|
|
}
|
|
|
|
func (d *DB) ChangeGroupArray() []any {
|
|
a := []any{}
|
|
a = append(a, new(string))
|
|
a = append(a, new(uint64))
|
|
a = append(a, new(time.Time))
|
|
a = append(a, new(time.Time))
|
|
a = append(a, new(string))
|
|
a = append(a, new(string))
|
|
|
|
return a
|
|
}
|
|
|
|
func (d *DB) ChangeGroupFromArray(a []any, txID string) (*types.ChangeGroup, string, error) {
|
|
v := &types.ChangeGroup{}
|
|
|
|
var vi any = v
|
|
if x, ok := vi.(sqlg.Initer); ok {
|
|
x.Init()
|
|
}
|
|
v.ID = *a[0].(*string)
|
|
v.Revision = *a[1].(*uint64)
|
|
v.CreationTime = *a[2].(*time.Time)
|
|
v.UpdateTime = *a[3].(*time.Time)
|
|
v.Name = *a[4].(*string)
|
|
v.Value = *a[5].(*string)
|
|
|
|
if x, ok := vi.(sqlg.PreJSONSetupper); ok {
|
|
if err := x.PreJSON(); err != nil {
|
|
return nil, "", errors.Wrap(err, "prejson error")
|
|
}
|
|
}
|
|
|
|
v.TxID = txID
|
|
|
|
return v, v.ID, nil
|
|
}
|
|
|
|
func (d *DB) fetchRunConfigs(tx *sql.Tx, q sq.Builder) ([]*types.RunConfig, []string, error) {
|
|
rows, err := d.query(tx, q)
|
|
if err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
return d.scanRunConfigs(rows, tx.ID(), 0)
|
|
}
|
|
|
|
func (d *DB) fetchRunConfigsSkipLastFields(tx *sql.Tx, q sq.Builder, skipFieldsCount uint) ([]*types.RunConfig, []string, error) {
|
|
rows, err := d.query(tx, q)
|
|
if err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
return d.scanRunConfigs(rows, tx.ID(), skipFieldsCount)
|
|
}
|
|
|
|
func (d *DB) scanRunConfig(rows *stdsql.Rows, skipFieldsCount uint) (*types.RunConfig, string, error) {
|
|
var inSetupErrorsJSON []byte
|
|
var inAnnotationsJSON []byte
|
|
var inStaticEnvironmentJSON []byte
|
|
var inEnvironmentJSON []byte
|
|
var inTasksJSON []byte
|
|
|
|
v := &types.RunConfig{}
|
|
|
|
var vi any = v
|
|
if x, ok := vi.(sqlg.Initer); ok {
|
|
x.Init()
|
|
}
|
|
|
|
fields := []any{&v.ID, &v.Revision, &v.CreationTime, &v.UpdateTime, &v.Name, &v.Group, &inSetupErrorsJSON, &inAnnotationsJSON, &inStaticEnvironmentJSON, &inEnvironmentJSON, &inTasksJSON, &v.CacheGroup}
|
|
|
|
for i := uint(0); i < skipFieldsCount; i++ {
|
|
fields = append(fields, new(any))
|
|
}
|
|
|
|
if err := rows.Scan(fields...); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to scan row")
|
|
}
|
|
|
|
if x, ok := vi.(sqlg.PreJSONSetupper); ok {
|
|
if err := x.PreJSON(); err != nil {
|
|
return nil, "", errors.Wrap(err, "prejson error")
|
|
}
|
|
}
|
|
if err := json.Unmarshal(inSetupErrorsJSON, &v.SetupErrors); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.SetupErrors")
|
|
}
|
|
if err := json.Unmarshal(inAnnotationsJSON, &v.Annotations); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.Annotations")
|
|
}
|
|
if err := json.Unmarshal(inStaticEnvironmentJSON, &v.StaticEnvironment); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.StaticEnvironment")
|
|
}
|
|
if err := json.Unmarshal(inEnvironmentJSON, &v.Environment); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.Environment")
|
|
}
|
|
if err := json.Unmarshal(inTasksJSON, &v.Tasks); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.Tasks")
|
|
}
|
|
|
|
return v, v.ID, nil
|
|
}
|
|
|
|
func (d *DB) scanRunConfigs(rows *stdsql.Rows, txID string, skipFieldsCount uint) ([]*types.RunConfig, []string, error) {
|
|
vs := []*types.RunConfig{}
|
|
ids := []string{}
|
|
for rows.Next() {
|
|
v, id, err := d.scanRunConfig(rows, skipFieldsCount)
|
|
if err != nil {
|
|
rows.Close()
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
v.TxID = txID
|
|
vs = append(vs, v)
|
|
ids = append(ids, id)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
return vs, ids, nil
|
|
}
|
|
|
|
func (d *DB) RunConfigArray() []any {
|
|
a := []any{}
|
|
a = append(a, new(string))
|
|
a = append(a, new(uint64))
|
|
a = append(a, new(time.Time))
|
|
a = append(a, new(time.Time))
|
|
a = append(a, new(string))
|
|
a = append(a, new(string))
|
|
a = append(a, new([]byte))
|
|
a = append(a, new([]byte))
|
|
a = append(a, new([]byte))
|
|
a = append(a, new([]byte))
|
|
a = append(a, new([]byte))
|
|
a = append(a, new(string))
|
|
|
|
return a
|
|
}
|
|
|
|
func (d *DB) RunConfigFromArray(a []any, txID string) (*types.RunConfig, string, error) {
|
|
v := &types.RunConfig{}
|
|
|
|
var vi any = v
|
|
if x, ok := vi.(sqlg.Initer); ok {
|
|
x.Init()
|
|
}
|
|
v.ID = *a[0].(*string)
|
|
v.Revision = *a[1].(*uint64)
|
|
v.CreationTime = *a[2].(*time.Time)
|
|
v.UpdateTime = *a[3].(*time.Time)
|
|
v.Name = *a[4].(*string)
|
|
v.Group = *a[5].(*string)
|
|
v.CacheGroup = *a[11].(*string)
|
|
|
|
if x, ok := vi.(sqlg.PreJSONSetupper); ok {
|
|
if err := x.PreJSON(); err != nil {
|
|
return nil, "", errors.Wrap(err, "prejson error")
|
|
}
|
|
}
|
|
if err := json.Unmarshal(a[6].([]byte), &v.SetupErrors); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.v.SetupErrors")
|
|
}
|
|
if err := json.Unmarshal(a[7].([]byte), &v.Annotations); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.v.Annotations")
|
|
}
|
|
if err := json.Unmarshal(a[8].([]byte), &v.StaticEnvironment); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.v.StaticEnvironment")
|
|
}
|
|
if err := json.Unmarshal(a[9].([]byte), &v.Environment); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.v.Environment")
|
|
}
|
|
if err := json.Unmarshal(a[10].([]byte), &v.Tasks); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.v.Tasks")
|
|
}
|
|
|
|
v.TxID = txID
|
|
|
|
return v, v.ID, nil
|
|
}
|
|
|
|
func (d *DB) fetchRuns(tx *sql.Tx, q sq.Builder) ([]*types.Run, []string, error) {
|
|
rows, err := d.query(tx, q)
|
|
if err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
return d.scanRuns(rows, tx.ID(), 0)
|
|
}
|
|
|
|
func (d *DB) fetchRunsSkipLastFields(tx *sql.Tx, q sq.Builder, skipFieldsCount uint) ([]*types.Run, []string, error) {
|
|
rows, err := d.query(tx, q)
|
|
if err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
return d.scanRuns(rows, tx.ID(), skipFieldsCount)
|
|
}
|
|
|
|
func (d *DB) scanRun(rows *stdsql.Rows, skipFieldsCount uint) (*types.Run, string, error) {
|
|
var inAnnotationsJSON []byte
|
|
var inTasksJSON []byte
|
|
|
|
v := &types.Run{}
|
|
|
|
var vi any = v
|
|
if x, ok := vi.(sqlg.Initer); ok {
|
|
x.Init()
|
|
}
|
|
|
|
fields := []any{&v.ID, &v.Revision, &v.CreationTime, &v.UpdateTime, &v.Sequence, &v.Name, &v.RunConfigID, &v.Counter, &v.Group, &inAnnotationsJSON, &v.Phase, &v.Result, &v.Stop, &inTasksJSON, &v.EnqueueTime, &v.StartTime, &v.EndTime, &v.Archived}
|
|
|
|
for i := uint(0); i < skipFieldsCount; i++ {
|
|
fields = append(fields, new(any))
|
|
}
|
|
|
|
if err := rows.Scan(fields...); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to scan row")
|
|
}
|
|
|
|
if x, ok := vi.(sqlg.PreJSONSetupper); ok {
|
|
if err := x.PreJSON(); err != nil {
|
|
return nil, "", errors.Wrap(err, "prejson error")
|
|
}
|
|
}
|
|
if err := json.Unmarshal(inAnnotationsJSON, &v.Annotations); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.Annotations")
|
|
}
|
|
if err := json.Unmarshal(inTasksJSON, &v.Tasks); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.Tasks")
|
|
}
|
|
|
|
return v, v.ID, nil
|
|
}
|
|
|
|
func (d *DB) scanRuns(rows *stdsql.Rows, txID string, skipFieldsCount uint) ([]*types.Run, []string, error) {
|
|
vs := []*types.Run{}
|
|
ids := []string{}
|
|
for rows.Next() {
|
|
v, id, err := d.scanRun(rows, skipFieldsCount)
|
|
if err != nil {
|
|
rows.Close()
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
v.TxID = txID
|
|
vs = append(vs, v)
|
|
ids = append(ids, id)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
return vs, ids, nil
|
|
}
|
|
|
|
func (d *DB) RunArray() []any {
|
|
a := []any{}
|
|
a = append(a, new(string))
|
|
a = append(a, new(uint64))
|
|
a = append(a, new(time.Time))
|
|
a = append(a, new(time.Time))
|
|
a = append(a, new(uint64))
|
|
a = append(a, new(string))
|
|
a = append(a, new(string))
|
|
a = append(a, new(uint64))
|
|
a = append(a, new(string))
|
|
a = append(a, new([]byte))
|
|
a = append(a, new(types.RunPhase))
|
|
a = append(a, new(types.RunResult))
|
|
a = append(a, new(bool))
|
|
a = append(a, new([]byte))
|
|
a = append(a, new(*time.Time))
|
|
a = append(a, new(*time.Time))
|
|
a = append(a, new(*time.Time))
|
|
a = append(a, new(bool))
|
|
|
|
return a
|
|
}
|
|
|
|
func (d *DB) RunFromArray(a []any, txID string) (*types.Run, string, error) {
|
|
v := &types.Run{}
|
|
|
|
var vi any = v
|
|
if x, ok := vi.(sqlg.Initer); ok {
|
|
x.Init()
|
|
}
|
|
v.ID = *a[0].(*string)
|
|
v.Revision = *a[1].(*uint64)
|
|
v.CreationTime = *a[2].(*time.Time)
|
|
v.UpdateTime = *a[3].(*time.Time)
|
|
v.Sequence = *a[4].(*uint64)
|
|
v.Name = *a[5].(*string)
|
|
v.RunConfigID = *a[6].(*string)
|
|
v.Counter = *a[7].(*uint64)
|
|
v.Group = *a[8].(*string)
|
|
v.Phase = *a[10].(*types.RunPhase)
|
|
v.Result = *a[11].(*types.RunResult)
|
|
v.Stop = *a[12].(*bool)
|
|
v.EnqueueTime = *a[14].(**time.Time)
|
|
v.StartTime = *a[15].(**time.Time)
|
|
v.EndTime = *a[16].(**time.Time)
|
|
v.Archived = *a[17].(*bool)
|
|
|
|
if x, ok := vi.(sqlg.PreJSONSetupper); ok {
|
|
if err := x.PreJSON(); err != nil {
|
|
return nil, "", errors.Wrap(err, "prejson error")
|
|
}
|
|
}
|
|
if err := json.Unmarshal(a[9].([]byte), &v.Annotations); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.v.Annotations")
|
|
}
|
|
if err := json.Unmarshal(a[13].([]byte), &v.Tasks); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.v.Tasks")
|
|
}
|
|
|
|
v.TxID = txID
|
|
|
|
return v, v.ID, nil
|
|
}
|
|
|
|
func (d *DB) fetchRunCounters(tx *sql.Tx, q sq.Builder) ([]*types.RunCounter, []string, error) {
|
|
rows, err := d.query(tx, q)
|
|
if err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
return d.scanRunCounters(rows, tx.ID(), 0)
|
|
}
|
|
|
|
func (d *DB) fetchRunCountersSkipLastFields(tx *sql.Tx, q sq.Builder, skipFieldsCount uint) ([]*types.RunCounter, []string, error) {
|
|
rows, err := d.query(tx, q)
|
|
if err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
return d.scanRunCounters(rows, tx.ID(), skipFieldsCount)
|
|
}
|
|
|
|
func (d *DB) scanRunCounter(rows *stdsql.Rows, skipFieldsCount uint) (*types.RunCounter, string, error) {
|
|
|
|
v := &types.RunCounter{}
|
|
|
|
var vi any = v
|
|
if x, ok := vi.(sqlg.Initer); ok {
|
|
x.Init()
|
|
}
|
|
|
|
fields := []any{&v.ID, &v.Revision, &v.CreationTime, &v.UpdateTime, &v.GroupID, &v.Value}
|
|
|
|
for i := uint(0); i < skipFieldsCount; i++ {
|
|
fields = append(fields, new(any))
|
|
}
|
|
|
|
if err := rows.Scan(fields...); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to scan row")
|
|
}
|
|
|
|
if x, ok := vi.(sqlg.PreJSONSetupper); ok {
|
|
if err := x.PreJSON(); err != nil {
|
|
return nil, "", errors.Wrap(err, "prejson error")
|
|
}
|
|
}
|
|
|
|
return v, v.ID, nil
|
|
}
|
|
|
|
func (d *DB) scanRunCounters(rows *stdsql.Rows, txID string, skipFieldsCount uint) ([]*types.RunCounter, []string, error) {
|
|
vs := []*types.RunCounter{}
|
|
ids := []string{}
|
|
for rows.Next() {
|
|
v, id, err := d.scanRunCounter(rows, skipFieldsCount)
|
|
if err != nil {
|
|
rows.Close()
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
v.TxID = txID
|
|
vs = append(vs, v)
|
|
ids = append(ids, id)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
return vs, ids, nil
|
|
}
|
|
|
|
func (d *DB) RunCounterArray() []any {
|
|
a := []any{}
|
|
a = append(a, new(string))
|
|
a = append(a, new(uint64))
|
|
a = append(a, new(time.Time))
|
|
a = append(a, new(time.Time))
|
|
a = append(a, new(string))
|
|
a = append(a, new(uint64))
|
|
|
|
return a
|
|
}
|
|
|
|
func (d *DB) RunCounterFromArray(a []any, txID string) (*types.RunCounter, string, error) {
|
|
v := &types.RunCounter{}
|
|
|
|
var vi any = v
|
|
if x, ok := vi.(sqlg.Initer); ok {
|
|
x.Init()
|
|
}
|
|
v.ID = *a[0].(*string)
|
|
v.Revision = *a[1].(*uint64)
|
|
v.CreationTime = *a[2].(*time.Time)
|
|
v.UpdateTime = *a[3].(*time.Time)
|
|
v.GroupID = *a[4].(*string)
|
|
v.Value = *a[5].(*uint64)
|
|
|
|
if x, ok := vi.(sqlg.PreJSONSetupper); ok {
|
|
if err := x.PreJSON(); err != nil {
|
|
return nil, "", errors.Wrap(err, "prejson error")
|
|
}
|
|
}
|
|
|
|
v.TxID = txID
|
|
|
|
return v, v.ID, nil
|
|
}
|
|
|
|
func (d *DB) fetchRunEvents(tx *sql.Tx, q sq.Builder) ([]*types.RunEvent, []string, error) {
|
|
rows, err := d.query(tx, q)
|
|
if err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
return d.scanRunEvents(rows, tx.ID(), 0)
|
|
}
|
|
|
|
func (d *DB) fetchRunEventsSkipLastFields(tx *sql.Tx, q sq.Builder, skipFieldsCount uint) ([]*types.RunEvent, []string, error) {
|
|
rows, err := d.query(tx, q)
|
|
if err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
return d.scanRunEvents(rows, tx.ID(), skipFieldsCount)
|
|
}
|
|
|
|
func (d *DB) scanRunEvent(rows *stdsql.Rows, skipFieldsCount uint) (*types.RunEvent, string, error) {
|
|
var inDataJSON []byte
|
|
|
|
v := &types.RunEvent{}
|
|
|
|
var vi any = v
|
|
if x, ok := vi.(sqlg.Initer); ok {
|
|
x.Init()
|
|
}
|
|
|
|
fields := []any{&v.ID, &v.Revision, &v.CreationTime, &v.UpdateTime, &v.Sequence, &v.RunEventType, &v.RunID, &v.Phase, &v.Result, &inDataJSON, &v.DataVersion}
|
|
|
|
for i := uint(0); i < skipFieldsCount; i++ {
|
|
fields = append(fields, new(any))
|
|
}
|
|
|
|
if err := rows.Scan(fields...); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to scan row")
|
|
}
|
|
|
|
if x, ok := vi.(sqlg.PreJSONSetupper); ok {
|
|
if err := x.PreJSON(); err != nil {
|
|
return nil, "", errors.Wrap(err, "prejson error")
|
|
}
|
|
}
|
|
if err := json.Unmarshal(inDataJSON, &v.Data); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.Data")
|
|
}
|
|
|
|
return v, v.ID, nil
|
|
}
|
|
|
|
func (d *DB) scanRunEvents(rows *stdsql.Rows, txID string, skipFieldsCount uint) ([]*types.RunEvent, []string, error) {
|
|
vs := []*types.RunEvent{}
|
|
ids := []string{}
|
|
for rows.Next() {
|
|
v, id, err := d.scanRunEvent(rows, skipFieldsCount)
|
|
if err != nil {
|
|
rows.Close()
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
v.TxID = txID
|
|
vs = append(vs, v)
|
|
ids = append(ids, id)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
return vs, ids, nil
|
|
}
|
|
|
|
func (d *DB) RunEventArray() []any {
|
|
a := []any{}
|
|
a = append(a, new(string))
|
|
a = append(a, new(uint64))
|
|
a = append(a, new(time.Time))
|
|
a = append(a, new(time.Time))
|
|
a = append(a, new(uint64))
|
|
a = append(a, new(types.RunEventType))
|
|
a = append(a, new(string))
|
|
a = append(a, new(types.RunPhase))
|
|
a = append(a, new(types.RunResult))
|
|
a = append(a, new([]byte))
|
|
a = append(a, new(uint64))
|
|
|
|
return a
|
|
}
|
|
|
|
func (d *DB) RunEventFromArray(a []any, txID string) (*types.RunEvent, string, error) {
|
|
v := &types.RunEvent{}
|
|
|
|
var vi any = v
|
|
if x, ok := vi.(sqlg.Initer); ok {
|
|
x.Init()
|
|
}
|
|
v.ID = *a[0].(*string)
|
|
v.Revision = *a[1].(*uint64)
|
|
v.CreationTime = *a[2].(*time.Time)
|
|
v.UpdateTime = *a[3].(*time.Time)
|
|
v.Sequence = *a[4].(*uint64)
|
|
v.RunEventType = *a[5].(*types.RunEventType)
|
|
v.RunID = *a[6].(*string)
|
|
v.Phase = *a[7].(*types.RunPhase)
|
|
v.Result = *a[8].(*types.RunResult)
|
|
v.DataVersion = *a[10].(*uint64)
|
|
|
|
if x, ok := vi.(sqlg.PreJSONSetupper); ok {
|
|
if err := x.PreJSON(); err != nil {
|
|
return nil, "", errors.Wrap(err, "prejson error")
|
|
}
|
|
}
|
|
if err := json.Unmarshal(a[9].([]byte), &v.Data); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.v.Data")
|
|
}
|
|
|
|
v.TxID = txID
|
|
|
|
return v, v.ID, nil
|
|
}
|
|
|
|
func (d *DB) fetchExecutors(tx *sql.Tx, q sq.Builder) ([]*types.Executor, []string, error) {
|
|
rows, err := d.query(tx, q)
|
|
if err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
return d.scanExecutors(rows, tx.ID(), 0)
|
|
}
|
|
|
|
func (d *DB) fetchExecutorsSkipLastFields(tx *sql.Tx, q sq.Builder, skipFieldsCount uint) ([]*types.Executor, []string, error) {
|
|
rows, err := d.query(tx, q)
|
|
if err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
return d.scanExecutors(rows, tx.ID(), skipFieldsCount)
|
|
}
|
|
|
|
func (d *DB) scanExecutor(rows *stdsql.Rows, skipFieldsCount uint) (*types.Executor, string, error) {
|
|
var inArchsJSON []byte
|
|
var inLabelsJSON []byte
|
|
var inSiblingsExecutorsJSON []byte
|
|
|
|
v := &types.Executor{}
|
|
|
|
var vi any = v
|
|
if x, ok := vi.(sqlg.Initer); ok {
|
|
x.Init()
|
|
}
|
|
|
|
fields := []any{&v.ID, &v.Revision, &v.CreationTime, &v.UpdateTime, &v.ExecutorID, &v.ListenURL, &inArchsJSON, &inLabelsJSON, &v.AllowPrivilegedContainers, &v.ActiveTasksLimit, &v.ActiveTasks, &v.Dynamic, &v.ExecutorGroup, &inSiblingsExecutorsJSON}
|
|
|
|
for i := uint(0); i < skipFieldsCount; i++ {
|
|
fields = append(fields, new(any))
|
|
}
|
|
|
|
if err := rows.Scan(fields...); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to scan row")
|
|
}
|
|
|
|
if x, ok := vi.(sqlg.PreJSONSetupper); ok {
|
|
if err := x.PreJSON(); err != nil {
|
|
return nil, "", errors.Wrap(err, "prejson error")
|
|
}
|
|
}
|
|
if err := json.Unmarshal(inArchsJSON, &v.Archs); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.Archs")
|
|
}
|
|
if err := json.Unmarshal(inLabelsJSON, &v.Labels); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.Labels")
|
|
}
|
|
if err := json.Unmarshal(inSiblingsExecutorsJSON, &v.SiblingsExecutors); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.SiblingsExecutors")
|
|
}
|
|
|
|
return v, v.ID, nil
|
|
}
|
|
|
|
func (d *DB) scanExecutors(rows *stdsql.Rows, txID string, skipFieldsCount uint) ([]*types.Executor, []string, error) {
|
|
vs := []*types.Executor{}
|
|
ids := []string{}
|
|
for rows.Next() {
|
|
v, id, err := d.scanExecutor(rows, skipFieldsCount)
|
|
if err != nil {
|
|
rows.Close()
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
v.TxID = txID
|
|
vs = append(vs, v)
|
|
ids = append(ids, id)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
return vs, ids, nil
|
|
}
|
|
|
|
func (d *DB) ExecutorArray() []any {
|
|
a := []any{}
|
|
a = append(a, new(string))
|
|
a = append(a, new(uint64))
|
|
a = append(a, new(time.Time))
|
|
a = append(a, new(time.Time))
|
|
a = append(a, new(string))
|
|
a = append(a, new(string))
|
|
a = append(a, new([]byte))
|
|
a = append(a, new([]byte))
|
|
a = append(a, new(bool))
|
|
a = append(a, new(int))
|
|
a = append(a, new(int))
|
|
a = append(a, new(bool))
|
|
a = append(a, new(string))
|
|
a = append(a, new([]byte))
|
|
|
|
return a
|
|
}
|
|
|
|
func (d *DB) ExecutorFromArray(a []any, txID string) (*types.Executor, string, error) {
|
|
v := &types.Executor{}
|
|
|
|
var vi any = v
|
|
if x, ok := vi.(sqlg.Initer); ok {
|
|
x.Init()
|
|
}
|
|
v.ID = *a[0].(*string)
|
|
v.Revision = *a[1].(*uint64)
|
|
v.CreationTime = *a[2].(*time.Time)
|
|
v.UpdateTime = *a[3].(*time.Time)
|
|
v.ExecutorID = *a[4].(*string)
|
|
v.ListenURL = *a[5].(*string)
|
|
v.AllowPrivilegedContainers = *a[8].(*bool)
|
|
v.ActiveTasksLimit = *a[9].(*int)
|
|
v.ActiveTasks = *a[10].(*int)
|
|
v.Dynamic = *a[11].(*bool)
|
|
v.ExecutorGroup = *a[12].(*string)
|
|
|
|
if x, ok := vi.(sqlg.PreJSONSetupper); ok {
|
|
if err := x.PreJSON(); err != nil {
|
|
return nil, "", errors.Wrap(err, "prejson error")
|
|
}
|
|
}
|
|
if err := json.Unmarshal(a[6].([]byte), &v.Archs); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.v.Archs")
|
|
}
|
|
if err := json.Unmarshal(a[7].([]byte), &v.Labels); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.v.Labels")
|
|
}
|
|
if err := json.Unmarshal(a[13].([]byte), &v.SiblingsExecutors); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.v.SiblingsExecutors")
|
|
}
|
|
|
|
v.TxID = txID
|
|
|
|
return v, v.ID, nil
|
|
}
|
|
|
|
func (d *DB) fetchExecutorTasks(tx *sql.Tx, q sq.Builder) ([]*types.ExecutorTask, []string, error) {
|
|
rows, err := d.query(tx, q)
|
|
if err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
return d.scanExecutorTasks(rows, tx.ID(), 0)
|
|
}
|
|
|
|
func (d *DB) fetchExecutorTasksSkipLastFields(tx *sql.Tx, q sq.Builder, skipFieldsCount uint) ([]*types.ExecutorTask, []string, error) {
|
|
rows, err := d.query(tx, q)
|
|
if err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
return d.scanExecutorTasks(rows, tx.ID(), skipFieldsCount)
|
|
}
|
|
|
|
func (d *DB) scanExecutorTask(rows *stdsql.Rows, skipFieldsCount uint) (*types.ExecutorTask, string, error) {
|
|
var inSetupStepJSON []byte
|
|
var inStepsJSON []byte
|
|
|
|
v := &types.ExecutorTask{}
|
|
|
|
var vi any = v
|
|
if x, ok := vi.(sqlg.Initer); ok {
|
|
x.Init()
|
|
}
|
|
|
|
fields := []any{&v.ID, &v.Revision, &v.CreationTime, &v.UpdateTime, &v.ExecutorID, &v.RunID, &v.RunTaskID, &v.Stop, &v.Phase, &v.Timedout, &v.FailError, &v.StartTime, &v.EndTime, &inSetupStepJSON, &inStepsJSON}
|
|
|
|
for i := uint(0); i < skipFieldsCount; i++ {
|
|
fields = append(fields, new(any))
|
|
}
|
|
|
|
if err := rows.Scan(fields...); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to scan row")
|
|
}
|
|
|
|
if x, ok := vi.(sqlg.PreJSONSetupper); ok {
|
|
if err := x.PreJSON(); err != nil {
|
|
return nil, "", errors.Wrap(err, "prejson error")
|
|
}
|
|
}
|
|
if err := json.Unmarshal(inSetupStepJSON, &v.SetupStep); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.SetupStep")
|
|
}
|
|
if err := json.Unmarshal(inStepsJSON, &v.Steps); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.Steps")
|
|
}
|
|
|
|
return v, v.ID, nil
|
|
}
|
|
|
|
func (d *DB) scanExecutorTasks(rows *stdsql.Rows, txID string, skipFieldsCount uint) ([]*types.ExecutorTask, []string, error) {
|
|
vs := []*types.ExecutorTask{}
|
|
ids := []string{}
|
|
for rows.Next() {
|
|
v, id, err := d.scanExecutorTask(rows, skipFieldsCount)
|
|
if err != nil {
|
|
rows.Close()
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
v.TxID = txID
|
|
vs = append(vs, v)
|
|
ids = append(ids, id)
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, nil, errors.WithStack(err)
|
|
}
|
|
return vs, ids, nil
|
|
}
|
|
|
|
func (d *DB) ExecutorTaskArray() []any {
|
|
a := []any{}
|
|
a = append(a, new(string))
|
|
a = append(a, new(uint64))
|
|
a = append(a, new(time.Time))
|
|
a = append(a, new(time.Time))
|
|
a = append(a, new(string))
|
|
a = append(a, new(string))
|
|
a = append(a, new(string))
|
|
a = append(a, new(bool))
|
|
a = append(a, new(types.ExecutorTaskPhase))
|
|
a = append(a, new(bool))
|
|
a = append(a, new(string))
|
|
a = append(a, new(*time.Time))
|
|
a = append(a, new(*time.Time))
|
|
a = append(a, new([]byte))
|
|
a = append(a, new([]byte))
|
|
|
|
return a
|
|
}
|
|
|
|
func (d *DB) ExecutorTaskFromArray(a []any, txID string) (*types.ExecutorTask, string, error) {
|
|
v := &types.ExecutorTask{}
|
|
|
|
var vi any = v
|
|
if x, ok := vi.(sqlg.Initer); ok {
|
|
x.Init()
|
|
}
|
|
v.ID = *a[0].(*string)
|
|
v.Revision = *a[1].(*uint64)
|
|
v.CreationTime = *a[2].(*time.Time)
|
|
v.UpdateTime = *a[3].(*time.Time)
|
|
v.ExecutorID = *a[4].(*string)
|
|
v.RunID = *a[5].(*string)
|
|
v.RunTaskID = *a[6].(*string)
|
|
v.Stop = *a[7].(*bool)
|
|
v.Phase = *a[8].(*types.ExecutorTaskPhase)
|
|
v.Timedout = *a[9].(*bool)
|
|
v.FailError = *a[10].(*string)
|
|
v.StartTime = *a[11].(**time.Time)
|
|
v.EndTime = *a[12].(**time.Time)
|
|
|
|
if x, ok := vi.(sqlg.PreJSONSetupper); ok {
|
|
if err := x.PreJSON(); err != nil {
|
|
return nil, "", errors.Wrap(err, "prejson error")
|
|
}
|
|
}
|
|
if err := json.Unmarshal(a[13].([]byte), &v.SetupStep); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.v.SetupStep")
|
|
}
|
|
if err := json.Unmarshal(a[14].([]byte), &v.Steps); err != nil {
|
|
return nil, "", errors.Wrap(err, "failed to unmarshal v.v.Steps")
|
|
}
|
|
|
|
v.TxID = txID
|
|
|
|
return v, v.ID, nil
|
|
}
|