Skip to content

Commit

Permalink
minor post pull fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
Vincent Planchenault committed Apr 26, 2022
1 parent c2ba76c commit 77b855d
Show file tree
Hide file tree
Showing 13 changed files with 28 additions and 54 deletions.
8 changes: 1 addition & 7 deletions lib/server/listeners/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ import (
"context"
"fmt"

"github.com/CS-SI/SafeScale/v21/lib/server/resources/operations"
googleprotobuf "github.com/golang/protobuf/ptypes/empty"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
Expand All @@ -29,6 +28,7 @@ import (
"github.com/CS-SI/SafeScale/v21/lib/server/resources"
clusterfactory "github.com/CS-SI/SafeScale/v21/lib/server/resources/factories/cluster"
hostfactory "github.com/CS-SI/SafeScale/v21/lib/server/resources/factories/host"
"github.com/CS-SI/SafeScale/v21/lib/server/resources/operations"
"github.com/CS-SI/SafeScale/v21/lib/server/resources/operations/converters"
propertiesv3 "github.com/CS-SI/SafeScale/v21/lib/server/resources/properties/v3"
srvutils "github.com/CS-SI/SafeScale/v21/lib/server/utils"
Expand Down Expand Up @@ -196,12 +196,6 @@ func (s *ClusterListener) Inspect(ctx context.Context, in *protocol.Reference) (
if xerr != nil {
return nil, xerr
}
defer func() {
derr := instance.Released()
if derr != nil {
logrus.Warn(derr)
}
}()

return instance.ToProtocol()
}
Expand Down
17 changes: 3 additions & 14 deletions lib/server/listeners/volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,9 @@ import (
"context"
"fmt"

"github.com/CS-SI/SafeScale/v21/lib/server/resources/abstract"
volumefactory "github.com/CS-SI/SafeScale/v21/lib/server/resources/factories/volume"
"github.com/CS-SI/SafeScale/v21/lib/utils/debug/tracing"
"github.com/sirupsen/logrus"

"github.com/CS-SI/SafeScale/v21/lib/protocol"
"github.com/CS-SI/SafeScale/v21/lib/server/handlers"
"github.com/CS-SI/SafeScale/v21/lib/server/resources/abstract"
"github.com/CS-SI/SafeScale/v21/lib/server/resources/enums/volumespeed"
volumefactory "github.com/CS-SI/SafeScale/v21/lib/server/resources/factories/volume"
srvutils "github.com/CS-SI/SafeScale/v21/lib/server/utils"
Expand Down Expand Up @@ -302,20 +298,13 @@ func (s *VolumeListener) Inspect(ctx context.Context, in *protocol.Reference) (_
defer tracer.Exiting()
defer fail.OnExitLogError(&err, tracer.TraceMessage())

volumeInstance, xerr := volumefactory.Load(job.Service(), ref)
volumeInstance, xerr := volumefactory.Load(job.Context(), job.Service(), ref)
if xerr != nil {
if _, ok := xerr.(*fail.ErrNotFound); ok {
return nil, abstract.ResourceNotFoundError("volume", ref)
}
return nil, xerr
}

defer func() {
issue := volumeInstance.Released()
if issue != nil {
logrus.Warn(issue)
}
}()

return volumeInstance.ToProtocol()
return volumeInstance.ToProtocol(job.Context())
}
2 changes: 1 addition & 1 deletion lib/server/resources/factories/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,5 +57,5 @@ func New(ctx context.Context, svc iaas.Service) (_ resources.Cluster, ferr fail.

// Load loads metadata of a cluster and returns an instance of resources.Cluster
func Load(ctx context.Context, svc iaas.Service, name string) (_ resources.Cluster, ferr fail.Error) {
return operations.LoadCluster(ctx, svc, name, operations.WithReloadOption)
return operations.LoadCluster(ctx, svc, name)
}
2 changes: 1 addition & 1 deletion lib/server/resources/factories/host/host.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,5 +62,5 @@ func New(svc iaas.Service) (_ resources.Host, err fail.Error) {

// Load loads the metadata of host and returns an instance of resources.Host
func Load(ctx context.Context, svc iaas.Service, ref string) (_ resources.Host, err fail.Error) {
return operations.LoadHost(ctx, svc, ref, operations.WithReloadOption)
return operations.LoadHost(ctx, svc, ref)
}
2 changes: 1 addition & 1 deletion lib/server/resources/factories/share/share.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,5 +35,5 @@ func New(svc iaas.Service) (resources.Share, fail.Error) {

// Load loads the metadata of a share and returns an instance of resources.Share
func Load(ctx context.Context, svc iaas.Service, ref string) (resources.Share, fail.Error) {
return operations.LoadShare(ctx, svc, ref, operations.WithReloadOption)
return operations.LoadShare(ctx, svc, ref)
}
2 changes: 1 addition & 1 deletion lib/server/resources/factories/volume/volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,5 +32,5 @@ func New(svc iaas.Service) (resources.Volume, fail.Error) {

// Load loads the metadata of a volume and returns an instance of resources.Volume
func Load(ctx context.Context, svc iaas.Service, ref string) (resources.Volume, fail.Error) {
return operations.LoadVolume(ctx, svc, ref, operations.WithReloadOption)
return operations.LoadVolume(ctx, svc, ref)
}
18 changes: 9 additions & 9 deletions lib/server/resources/operations/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ func (instance *Cluster) startRandomDelayGenerator(ctx context.Context, min, max
}

// LoadCluster loads cluster information from metadata
func LoadCluster(ctx context.Context, svc iaas.Service, name string, options ...data.ImmutableKeyValue) (_ resources.Cluster, ferr fail.Error) {
func LoadCluster(ctx context.Context, svc iaas.Service, name string) (_ resources.Cluster, ferr fail.Error) {
defer fail.OnPanic(&ferr)

if svc == nil {
Expand Down Expand Up @@ -1194,7 +1194,7 @@ func (instance *Cluster) AddNodes(ctx context.Context, count uint, def abstract.
for _, v := range nodes {
v := v
_, derr = dtg.Start(
instance.taskDeleteNode, taskDeleteNodeParameters{node: v, nodeLoadMethod: WithoutReloadOption},
instance.taskDeleteNode, taskDeleteNodeParameters{node: v},
)
if derr != nil {
abErr := dtg.AbortWithCause(derr)
Expand Down Expand Up @@ -1386,7 +1386,7 @@ func (instance *Cluster) DeleteSpecificNode(ctx context.Context, hostID string,
return xerr
}

xerr = instance.deleteNode(task.Context(), node, selectedMaster.(*Host), WithReloadOption)
xerr = instance.deleteNode(task.Context(), node, selectedMaster.(*Host))
if xerr != nil {
return xerr
}
Expand Down Expand Up @@ -2037,7 +2037,7 @@ func (instance *Cluster) deleteMaster(ctx context.Context, host resources.Host)
}

// deleteNode deletes a node
func (instance *Cluster) deleteNode(ctx context.Context, node *propertiesv3.ClusterNode, master *Host, loadHostMethod data.ImmutableKeyValue) (ferr fail.Error) {
func (instance *Cluster) deleteNode(ctx context.Context, node *propertiesv3.ClusterNode, master *Host) (ferr fail.Error) {
task, xerr := concurrency.TaskFromContextOrVoid(ctx)
xerr = debug.InjectPlannedFail(xerr)
if xerr != nil {
Expand Down Expand Up @@ -2114,7 +2114,7 @@ func (instance *Cluster) deleteNode(ctx context.Context, node *propertiesv3.Clus
}()

// Deletes node
hostInstance, xerr := LoadHost(task.Context(), instance.Service(), nodeRef, loadHostMethod)
hostInstance, xerr := LoadHost(task.Context(), instance.Service(), nodeRef)
xerr = debug.InjectPlannedFail(xerr)
if xerr != nil {
switch xerr.(type) {
Expand Down Expand Up @@ -2312,7 +2312,7 @@ func (instance *Cluster) delete(ctx context.Context) (ferr fail.Error) {

completedOptions = append(completedOptions, concurrency.AmendID(fmt.Sprintf("/node/%s/delete", n.Name)))
_, xerr = tg.Start(
instance.taskDeleteNode, taskDeleteNodeParameters{node: n, nodeLoadMethod: WithoutReloadOption},
instance.taskDeleteNode, taskDeleteNodeParameters{node: n},
completedOptions...,
)
xerr = debug.InjectPlannedFail(xerr)
Expand All @@ -2338,7 +2338,7 @@ func (instance *Cluster) delete(ctx context.Context) (ferr fail.Error) {

completedOptions = append(completedOptions, concurrency.AmendID(fmt.Sprintf("/master/%s/delete", n.Name)))
_, xerr := tg.Start(
instance.taskDeleteMaster, taskDeleteNodeParameters{node: n, nodeLoadMethod: WithoutReloadOption},
instance.taskDeleteMaster, taskDeleteNodeParameters{node: n},
completedOptions...,
)
xerr = debug.InjectPlannedFail(xerr)
Expand Down Expand Up @@ -2396,7 +2396,7 @@ func (instance *Cluster) delete(ctx context.Context) (ferr fail.Error) {

for _, v := range all {
_, xerr = tg.Start(
instance.taskDeleteNode, taskDeleteNodeParameters{node: v, nodeLoadMethod: WithoutReloadOption},
instance.taskDeleteNode, taskDeleteNodeParameters{node: v},
concurrency.InheritParentIDOption, concurrency.AmendID(fmt.Sprintf("/node/%s/delete", v.Name)),
)
xerr = debug.InjectPlannedFail(xerr)
Expand Down Expand Up @@ -3407,7 +3407,7 @@ func (instance *Cluster) Shrink(ctx context.Context, count uint) (_ []*propertie
for _, v := range removedNodes {
_, xerr = tg.Start(
instance.taskDeleteNode,
taskDeleteNodeParameters{node: v, nodeLoadMethod: WithReloadOption, master: selectedMaster.(*Host)},
taskDeleteNodeParameters{node: v, master: selectedMaster.(*Host)},
concurrency.InheritParentIDOption, concurrency.AmendID(fmt.Sprintf("/node/%s/delete", v.Name)),
)
xerr = debug.InjectPlannedFail(xerr)
Expand Down
12 changes: 4 additions & 8 deletions lib/server/resources/operations/clustertasks.go
Original file line number Diff line number Diff line change
Expand Up @@ -2845,9 +2845,8 @@ func (instance *Cluster) taskDeleteNodeOnFailure(task concurrency.Task, params c
}

type taskDeleteNodeParameters struct {
node *propertiesv3.ClusterNode
nodeLoadMethod data.ImmutableKeyValue
master *Host
node *propertiesv3.ClusterNode
master *Host
}

// taskDeleteNode deletes one node
Expand Down Expand Up @@ -2876,9 +2875,6 @@ func (instance *Cluster) taskDeleteNode(task concurrency.Task, params concurrenc
if p.node.ID == "" && p.node.Name == "" {
return nil, fail.InvalidParameterError("params.node.ID|params.node.Name", "ID or Name must be set")
}
if p.nodeLoadMethod != WithoutReloadOption && p.nodeLoadMethod != WithReloadOption {
return nil, fail.InvalidParameterError("params.nodeLoadMethod", "must be 'WithoutReloadOption' or 'WithReloadOption'")
}
nodeName := p.node.Name
if nodeName == "" {
nodeName = p.node.ID
Expand All @@ -2900,7 +2896,7 @@ func (instance *Cluster) taskDeleteNode(task concurrency.Task, params concurrenc
}()

logrus.Debugf("Deleting Node '%s'", nodeName)
xerr = instance.deleteNode(task.Context(), p.node, p.master, p.nodeLoadMethod)
xerr = instance.deleteNode(task.Context(), p.node, p.master)
xerr = debug.InjectPlannedFail(xerr)
if xerr != nil {
switch xerr.(type) {
Expand Down Expand Up @@ -2952,7 +2948,7 @@ func (instance *Cluster) taskDeleteMaster(task concurrency.Task, params concurre
return nil, fail.AbortedError(lerr, "parent task killed")
}

host, xerr := LoadHost(task.Context(), instance.Service(), nodeName, WithoutReloadOption)
host, xerr := LoadHost(task.Context(), instance.Service(), nodeName)
xerr = debug.InjectPlannedFail(xerr)
if xerr != nil {
switch xerr.(type) {
Expand Down
4 changes: 2 additions & 2 deletions lib/server/resources/operations/host.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ func NewHost(svc iaas.Service) (_ *Host, ferr fail.Error) {
}

// LoadHost ...
func LoadHost(ctx context.Context, svc iaas.Service, ref string, options ...data.ImmutableKeyValue) (_ resources.Host, ferr fail.Error) {
func LoadHost(ctx context.Context, svc iaas.Service, ref string) (_ resources.Host, ferr fail.Error) {
defer fail.OnPanic(&ferr)

if svc == nil {
Expand Down Expand Up @@ -2314,7 +2314,7 @@ func (instance *Host) RelaxedDeleteHost(ctx context.Context) (ferr fail.Error) {
if count > 0 {
// clients found, checks if these clients already exists...
for _, hostID := range hostShare.ClientsByID {
instance, inErr := LoadHost(task.Context(), svc, hostID, WithoutReloadOption)
instance, inErr := LoadHost(task.Context(), svc, hostID)
if inErr != nil {
debug.IgnoreError(inErr)
continue
Expand Down
6 changes: 3 additions & 3 deletions lib/server/resources/operations/share.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ func NewShare(svc iaas.Service) (resources.Share, fail.Error) {
// If error is fail.ErrNotFound return this error
// In case of any other error, abort the retry to propagate the error
// If retry times out, return fail.ErrTimeout
func LoadShare(svc iaas.Service, ref string) (shareInstance resources.Share, ferr fail.Error) {
func LoadShare(ctx context.Context, svc iaas.Service, ref string) (_ resources.Share, ferr fail.Error) {
defer fail.OnPanic(&ferr)

if svc == nil {
Expand All @@ -150,7 +150,7 @@ func LoadShare(svc iaas.Service, ref string) (shareInstance resources.Share, fer
return nil, fail.InvalidParameterError("ref", "cannot be empty string")
}

cacheMissLoader := func() (data.Identifiable, fail.Error) { return onShareCacheMiss(svc, ref) }
cacheMissLoader := func() (data.Identifiable, fail.Error) { return onShareCacheMiss(ctx, svc, ref) }
anon, xerr := cacheMissLoader()
if xerr != nil {
return nil, xerr
Expand All @@ -169,7 +169,7 @@ func LoadShare(svc iaas.Service, ref string) (shareInstance resources.Share, fer
}

// onShareCacheMiss is called when there is no instance in cache of Share 'ref'
func onShareCacheMiss(svc iaas.Service, ref string) (data.Identifiable, fail.Error) {
func onShareCacheMiss(ctx context.Context, svc iaas.Service, ref string) (data.Identifiable, fail.Error) {
shareInstance, innerXErr := NewShare(svc)
if innerXErr != nil {
return nil, innerXErr
Expand Down
2 changes: 1 addition & 1 deletion lib/server/resources/operations/subnet.go
Original file line number Diff line number Diff line change
Expand Up @@ -1173,7 +1173,7 @@ func (instance *Subnet) Delete(ctx context.Context) (ferr fail.Error) {
if hostsLen > 0 {
for k := range shV1.ByName {
// Check if Host still has metadata and count it if yes
if hess, innerXErr := LoadHost(lastCtx, svc, k, WithReloadOption); innerXErr != nil {
if hess, innerXErr := LoadHost(lastCtx, svc, k); innerXErr != nil {
debug.IgnoreError(innerXErr)
} else {
if _, innerXErr := hess.ForceGetState(lastCtx); innerXErr != nil {
Expand Down
4 changes: 0 additions & 4 deletions lib/server/resources/operations/subnetunsafe.go
Original file line number Diff line number Diff line change
Expand Up @@ -571,10 +571,6 @@ func (instance *Subnet) unsafeCreateSubnet(ctx context.Context, req abstract.Sub
req.DefaultSSHPort = 22
}

if req.DefaultSSHPort == 0 {
req.DefaultSSHPort = 22
}

subnetGWSG, subnetInternalSG, subnetPublicIPSG, xerr := instance.unsafeCreateSecurityGroups(ctx, networkInstance, req.KeepOnFailure, int32(req.DefaultSSHPort))
xerr = debug.InjectPlannedFail(xerr)
if xerr != nil {
Expand Down
3 changes: 1 addition & 2 deletions lib/server/resources/operations/volume.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ func NewVolume(svc iaas.Service) (_ resources.Volume, ferr fail.Error) {
}

// LoadVolume loads the metadata of a subnet
func LoadVolume(svc iaas.Service, ref string) (volumeInstance resources.Volume, ferr fail.Error) {
func LoadVolume(ctx context.Context, svc iaas.Service, ref string) (_ resources.Volume, ferr fail.Error) {
defer fail.OnPanic(&ferr)

if svc == nil {
Expand All @@ -93,7 +93,6 @@ func LoadVolume(svc iaas.Service, ref string) (volumeInstance resources.Volume,
return nil, xerr
}

var ok bool
volumeInstance, ok := anon.(resources.Volume)
if !ok {
return nil, fail.InconsistentError("value in cache for Volume with key '%s' is not a resources.Volume", ref)
Expand Down

0 comments on commit 77b855d

Please sign in to comment.