package terraform
import (
+ "bytes"
"context"
"fmt"
"log"
- "sort"
"strings"
"sync"
- "github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl"
+ "github.com/zclconf/go-cty/cty"
+
+ "github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/config"
- "github.com/hashicorp/terraform/config/module"
- "github.com/hashicorp/terraform/helper/experiment"
+ "github.com/hashicorp/terraform/configs"
+ "github.com/hashicorp/terraform/lang"
+ "github.com/hashicorp/terraform/plans"
+ "github.com/hashicorp/terraform/providers"
+ "github.com/hashicorp/terraform/provisioners"
+ "github.com/hashicorp/terraform/states"
+ "github.com/hashicorp/terraform/states/statefile"
+ "github.com/hashicorp/terraform/tfdiags"
)
// InputMode defines what sort of input will be asked for when Input
// ContextOpts are the user-configurable options to create a context with
// NewContext.
type ContextOpts struct {
- Meta *ContextMeta
- Destroy bool
- Diff *Diff
- Hooks []Hook
- Module *module.Tree
- Parallelism int
- State *State
- StateFutureAllowed bool
- Providers map[string]ResourceProviderFactory
- Provisioners map[string]ResourceProvisionerFactory
- Shadow bool
- Targets []string
- Variables map[string]interface{}
+ Config *configs.Config
+ Changes *plans.Changes
+ State *states.State
+ Targets []addrs.Targetable
+ Variables InputValues
+ Meta *ContextMeta
+ Destroy bool
+
+ Hooks []Hook
+ Parallelism int
+ ProviderResolver providers.Resolver
+ Provisioners map[string]ProvisionerFactory
+
+ // If non-nil, will apply as additional constraints on the provider
+ // plugins that will be requested from the provider resolver.
+ ProviderSHA256s map[string][]byte
+ SkipProviderVerify bool
UIInput UIInput
}
// Context represents all the context that Terraform needs in order to
// perform operations on infrastructure. This structure is built using
-// NewContext. See the documentation for that.
-//
-// Extra functions on Context can be found in context_*.go files.
+// NewContext.
type Context struct {
- // Maintainer note: Anytime this struct is changed, please verify
- // that newShadowContext still does the right thing. Tests should
- // fail regardless but putting this note here as well.
+ config *configs.Config
+ changes *plans.Changes
+ state *states.State
+ targets []addrs.Targetable
+ variables InputValues
+ meta *ContextMeta
+ destroy bool
- components contextComponentFactory
- destroy bool
- diff *Diff
- diffLock sync.RWMutex
hooks []Hook
- meta *ContextMeta
- module *module.Tree
+ components contextComponentFactory
+ schemas *Schemas
sh *stopHook
- shadow bool
- state *State
- stateLock sync.RWMutex
- targets []string
uiInput UIInput
- variables map[string]interface{}
l sync.Mutex // Lock acquired during any task
parallelSem Semaphore
- providerInputConfig map[string]map[string]interface{}
+ providerInputConfig map[string]map[string]cty.Value
+ providerSHA256s map[string][]byte
runLock sync.Mutex
runCond *sync.Cond
runContext context.Context
shadowErr error
}
+// (additional methods on Context can be found in context_*.go files.)
+
// NewContext creates a new Context structure.
//
-// Once a Context is creator, the pointer values within ContextOpts
-// should not be mutated in any way, since the pointers are copied, not
-// the values themselves.
-func NewContext(opts *ContextOpts) (*Context, error) {
- // Validate the version requirement if it is given
- if opts.Module != nil {
- if err := checkRequiredVersion(opts.Module); err != nil {
- return nil, err
- }
+// Once a Context is created, the caller must not access or mutate any of
+// the objects referenced (directly or indirectly) by the ContextOpts fields.
+//
+// If the returned diagnostics contains errors then the resulting context is
+// invalid and must not be used.
+func NewContext(opts *ContextOpts) (*Context, tfdiags.Diagnostics) {
+ log.Printf("[TRACE] terraform.NewContext: starting")
+ diags := CheckCoreVersionRequirements(opts.Config)
+ // If version constraints are not met then we'll bail early since otherwise
+ // we're likely to just see a bunch of other errors related to
+ // incompatibilities, which could be overwhelming for the user.
+ if diags.HasErrors() {
+ return nil, diags
}
// Copy all the hooks and add our stop hook. We don't append directly
state := opts.State
if state == nil {
- state = new(State)
- state.init()
- }
-
- // If our state is from the future, then error. Callers can avoid
- // this error by explicitly setting `StateFutureAllowed`.
- if !opts.StateFutureAllowed && state.FromFutureTerraform() {
- return nil, fmt.Errorf(
- "Terraform doesn't allow running any operations against a state\n"+
- "that was written by a future Terraform version. The state is\n"+
- "reporting it is written by Terraform '%s'.\n\n"+
- "Please run at least that version of Terraform to continue.",
- state.TFVersion)
+ state = states.NewState()
}
- // Explicitly reset our state version to our current version so that
- // any operations we do will write out that our latest version
- // has run.
- state.TFVersion = Version
-
// Determine parallelism, default to 10. We do this both to limit
// CPU pressure but also to have an extra guard against rate throttling
// from providers.
// 2 - Take values specified in -var flags, overriding values
// set by environment variables if necessary. This includes
// values taken from -var-file in addition.
- variables := make(map[string]interface{})
+ var variables InputValues
+ if opts.Config != nil {
+ // Default variables from the configuration seed our map.
+ variables = DefaultVariableValues(opts.Config.Module.Variables)
+ }
+ // Variables provided by the caller (from CLI, environment, etc) can
+ // override the defaults.
+ variables = variables.Override(opts.Variables)
+
+ // Bind available provider plugins to the constraints in config
+ var providerFactories map[string]providers.Factory
+ if opts.ProviderResolver != nil {
+ deps := ConfigTreeDependencies(opts.Config, state)
+ reqd := deps.AllPluginRequirements()
+ if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify {
+ reqd.LockExecutables(opts.ProviderSHA256s)
+ }
+ log.Printf("[TRACE] terraform.NewContext: resolving provider version selections")
- if opts.Module != nil {
- var err error
- variables, err = Variables(opts.Module, opts.Variables)
- if err != nil {
- return nil, err
+ var providerDiags tfdiags.Diagnostics
+ providerFactories, providerDiags = resourceProviderFactories(opts.ProviderResolver, reqd)
+ diags = diags.Append(providerDiags)
+
+ if diags.HasErrors() {
+ return nil, diags
}
+ } else {
+ providerFactories = make(map[string]providers.Factory)
+ }
+
+ components := &basicComponentFactory{
+ providers: providerFactories,
+ provisioners: opts.Provisioners,
+ }
+
+ log.Printf("[TRACE] terraform.NewContext: loading provider schemas")
+ schemas, err := LoadSchemas(opts.Config, opts.State, components)
+ if err != nil {
+ diags = diags.Append(err)
+ return nil, diags
}
- diff := opts.Diff
- if diff == nil {
- diff = &Diff{}
+ changes := opts.Changes
+ if changes == nil {
+ changes = plans.NewChanges()
}
+ config := opts.Config
+ if config == nil {
+ config = configs.NewEmptyConfig()
+ }
+
+ log.Printf("[TRACE] terraform.NewContext: complete")
+
return &Context{
- components: &basicComponentFactory{
- providers: opts.Providers,
- provisioners: opts.Provisioners,
- },
- destroy: opts.Destroy,
- diff: diff,
- hooks: hooks,
- meta: opts.Meta,
- module: opts.Module,
- shadow: opts.Shadow,
- state: state,
- targets: opts.Targets,
- uiInput: opts.UIInput,
- variables: variables,
+ components: components,
+ schemas: schemas,
+ destroy: opts.Destroy,
+ changes: changes,
+ hooks: hooks,
+ meta: opts.Meta,
+ config: config,
+ state: state,
+ targets: opts.Targets,
+ uiInput: opts.UIInput,
+ variables: variables,
parallelSem: NewSemaphore(par),
- providerInputConfig: make(map[string]map[string]interface{}),
+ providerInputConfig: make(map[string]map[string]cty.Value),
+ providerSHA256s: opts.ProviderSHA256s,
sh: sh,
}, nil
}
+func (c *Context) Schemas() *Schemas {
+ return c.schemas
+}
+
type ContextGraphOpts struct {
// If true, validates the graph structure (checks for cycles).
Validate bool
// Graph returns the graph used for the given operation type.
//
// The most extensive or complex graph type is GraphTypePlan.
-func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
+func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, tfdiags.Diagnostics) {
if opts == nil {
opts = &ContextGraphOpts{Validate: true}
}
switch typ {
case GraphTypeApply:
return (&ApplyGraphBuilder{
- Module: c.module,
- Diff: c.diff,
- State: c.state,
- Providers: c.components.ResourceProviders(),
- Provisioners: c.components.ResourceProvisioners(),
- Targets: c.targets,
- Destroy: c.destroy,
- Validate: opts.Validate,
- }).Build(RootModulePath)
-
- case GraphTypeInput:
- // The input graph is just a slightly modified plan graph
- fallthrough
+ Config: c.config,
+ Changes: c.changes,
+ State: c.state,
+ Components: c.components,
+ Schemas: c.schemas,
+ Targets: c.targets,
+ Destroy: c.destroy,
+ Validate: opts.Validate,
+ }).Build(addrs.RootModuleInstance)
+
case GraphTypeValidate:
// The validate graph is just a slightly modified plan graph
fallthrough
case GraphTypePlan:
// Create the plan graph builder
p := &PlanGraphBuilder{
- Module: c.module,
- State: c.state,
- Providers: c.components.ResourceProviders(),
- Targets: c.targets,
- Validate: opts.Validate,
+ Config: c.config,
+ State: c.state,
+ Components: c.components,
+ Schemas: c.schemas,
+ Targets: c.targets,
+ Validate: opts.Validate,
}
// Some special cases for other graph types shared with plan currently
var b GraphBuilder = p
switch typ {
- case GraphTypeInput:
- b = InputGraphBuilder(p)
case GraphTypeValidate:
- // We need to set the provisioners so those can be validated
- p.Provisioners = c.components.ResourceProvisioners()
-
b = ValidateGraphBuilder(p)
}
- return b.Build(RootModulePath)
+ return b.Build(addrs.RootModuleInstance)
case GraphTypePlanDestroy:
return (&DestroyPlanGraphBuilder{
- Module: c.module,
- State: c.state,
- Targets: c.targets,
- Validate: opts.Validate,
- }).Build(RootModulePath)
+ Config: c.config,
+ State: c.state,
+ Components: c.components,
+ Schemas: c.schemas,
+ Targets: c.targets,
+ Validate: opts.Validate,
+ }).Build(addrs.RootModuleInstance)
case GraphTypeRefresh:
return (&RefreshGraphBuilder{
- Module: c.module,
- State: c.state,
- Providers: c.components.ResourceProviders(),
- Targets: c.targets,
- Validate: opts.Validate,
- }).Build(RootModulePath)
- }
+ Config: c.config,
+ State: c.state,
+ Components: c.components,
+ Schemas: c.schemas,
+ Targets: c.targets,
+ Validate: opts.Validate,
+ }).Build(addrs.RootModuleInstance)
+
+ case GraphTypeEval:
+ return (&EvalGraphBuilder{
+ Config: c.config,
+ State: c.state,
+ Components: c.components,
+ Schemas: c.schemas,
+ }).Build(addrs.RootModuleInstance)
- return nil, fmt.Errorf("unknown graph type: %s", typ)
+ default:
+ // Should never happen, because the above is exhaustive for all graph types.
+ panic(fmt.Errorf("unsupported graph type %s", typ))
+ }
}
// ShadowError returns any errors caught during a shadow operation.
// State returns a copy of the current state associated with this context.
//
// This cannot safely be called in parallel with any other Context function.
-func (c *Context) State() *State {
+func (c *Context) State() *states.State {
return c.state.DeepCopy()
}
-// Interpolater returns an Interpolater built on a copy of the state
-// that can be used to test interpolation values.
-func (c *Context) Interpolater() *Interpolater {
- var varLock sync.Mutex
- var stateLock sync.RWMutex
- return &Interpolater{
- Operation: walkApply,
- Meta: c.meta,
- Module: c.module,
- State: c.state.DeepCopy(),
- StateLock: &stateLock,
- VariableValues: c.variables,
- VariableValuesLock: &varLock,
- }
+// Eval produces a scope in which expressions can be evaluated for
+// the given module path.
+//
+// This method must first evaluate any ephemeral values (input variables, local
+// values, and output values) in the configuration. These ephemeral values are
+// not included in the persisted state, so they must be re-computed using other
+// values in the state before they can be properly evaluated. The updated
+// values are retained in the main state associated with the receiving context.
+//
+// This function takes no action against remote APIs but it does need access
+// to all provider and provisioner instances in order to obtain their schemas
+// for type checking.
+//
+// The result is an evaluation scope that can be used to resolve references
+// against the root module. If the returned diagnostics contains errors then
+// the returned scope may be nil. If it is not nil then it may still be used
+// to attempt expression evaluation or other analysis, but some expressions
+// may not behave as expected.
+func (c *Context) Eval(path addrs.ModuleInstance) (*lang.Scope, tfdiags.Diagnostics) {
+ // This is intended for external callers such as the "terraform console"
+ // command. Internally, we create an evaluator in c.walk before walking
+ // the graph, and create scopes in ContextGraphWalker.
+
+ var diags tfdiags.Diagnostics
+ defer c.acquireRun("eval")()
+
+ // Start with a copy of state so that we don't affect any instances
+ // that other methods may have already returned.
+ c.state = c.state.DeepCopy()
+ var walker *ContextGraphWalker
+
+ graph, graphDiags := c.Graph(GraphTypeEval, nil)
+ diags = diags.Append(graphDiags)
+ if !diags.HasErrors() {
+ var walkDiags tfdiags.Diagnostics
+ walker, walkDiags = c.walk(graph, walkEval)
+ diags = diags.Append(walker.NonFatalDiagnostics)
+ diags = diags.Append(walkDiags)
+ }
+
+ if walker == nil {
+ // If we skipped walking the graph (due to errors) then we'll just
+ // use a placeholder graph walker here, which'll refer to the
+ // unmodified state.
+ walker = c.graphWalker(walkEval)
+ }
+
+ // This is a bit weird since we don't normally evaluate outside of
+ // the context of a walk, but we'll "re-enter" our desired path here
+ // just to get hold of an EvalContext for it. GraphContextBuiltin
+ // caches its contexts, so we should get hold of the context that was
+ // previously used for evaluation here, unless we skipped walking.
+ evalCtx := walker.EnterPath(path)
+ return evalCtx.EvaluationScope(nil, EvalDataForNoInstanceKey), diags
}
-// Input asks for input to fill variables and provider configurations.
-// This modifies the configuration in-place, so asking for Input twice
-// may result in different UI output showing different current values.
-func (c *Context) Input(mode InputMode) error {
- defer c.acquireRun("input")()
-
- if mode&InputModeVar != 0 {
- // Walk the variables first for the root module. We walk them in
- // alphabetical order for UX reasons.
- rootConf := c.module.Config()
- names := make([]string, len(rootConf.Variables))
- m := make(map[string]*config.Variable)
- for i, v := range rootConf.Variables {
- names[i] = v.Name
- m[v.Name] = v
- }
- sort.Strings(names)
- for _, n := range names {
- // If we only care about unset variables, then if the variable
- // is set, continue on.
- if mode&InputModeVarUnset != 0 {
- if _, ok := c.variables[n]; ok {
- continue
- }
- }
-
- var valueType config.VariableType
-
- v := m[n]
- switch valueType = v.Type(); valueType {
- case config.VariableTypeUnknown:
- continue
- case config.VariableTypeMap:
- // OK
- case config.VariableTypeList:
- // OK
- case config.VariableTypeString:
- // OK
- default:
- panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
- }
-
- // If the variable is not already set, and the variable defines a
- // default, use that for the value.
- if _, ok := c.variables[n]; !ok {
- if v.Default != nil {
- c.variables[n] = v.Default.(string)
- continue
- }
- }
-
- // this should only happen during tests
- if c.uiInput == nil {
- log.Println("[WARN] Content.uiInput is nil")
- continue
- }
-
- // Ask the user for a value for this variable
- var value string
- retry := 0
- for {
- var err error
- value, err = c.uiInput.Input(&InputOpts{
- Id: fmt.Sprintf("var.%s", n),
- Query: fmt.Sprintf("var.%s", n),
- Description: v.Description,
- })
- if err != nil {
- return fmt.Errorf(
- "Error asking for %s: %s", n, err)
- }
-
- if value == "" && v.Required() {
- // Redo if it is required, but abort if we keep getting
- // blank entries
- if retry > 2 {
- return fmt.Errorf("missing required value for %q", n)
- }
- retry++
- continue
- }
-
- break
- }
-
- // no value provided, so don't set the variable at all
- if value == "" {
- continue
- }
-
- decoded, err := parseVariableAsHCL(n, value, valueType)
- if err != nil {
- return err
- }
-
- if decoded != nil {
- c.variables[n] = decoded
- }
- }
- }
-
- if mode&InputModeProvider != 0 {
- // Build the graph
- graph, err := c.Graph(GraphTypeInput, nil)
- if err != nil {
- return err
- }
-
- // Do the walk
- if _, err := c.walk(graph, nil, walkInput); err != nil {
- return err
- }
- }
-
- return nil
+// Interpolater is no longer used. Use Evaluator instead.
+//
+// The interpolator returned from this function will return an error on any use.
+func (c *Context) Interpolater() *Interpolater {
+ // FIXME: Remove this once all callers are updated to no longer use it.
+ return &Interpolater{}
}
// Apply applies the changes represented by this context and returns
// State() method. Currently the helper/resource testing framework relies
// on the absence of a returned state to determine if Destroy can be
// called, so that will need to be refactored before this can be changed.
-func (c *Context) Apply() (*State, error) {
+func (c *Context) Apply() (*states.State, tfdiags.Diagnostics) {
defer c.acquireRun("apply")()
// Copy our own state
c.state = c.state.DeepCopy()
// Build the graph.
- graph, err := c.Graph(GraphTypeApply, nil)
- if err != nil {
- return nil, err
+ graph, diags := c.Graph(GraphTypeApply, nil)
+ if diags.HasErrors() {
+ return nil, diags
}
// Determine the operation
}
// Walk the graph
- walker, err := c.walk(graph, graph, operation)
- if len(walker.ValidationErrors) > 0 {
- err = multierror.Append(err, walker.ValidationErrors...)
- }
-
- // Clean out any unused things
- c.state.prune()
-
- return c.state, err
+ walker, walkDiags := c.walk(graph, operation)
+ diags = diags.Append(walker.NonFatalDiagnostics)
+ diags = diags.Append(walkDiags)
+
+ if c.destroy && !diags.HasErrors() {
+ // If we know we were trying to destroy objects anyway, and we
+ // completed without any errors, then we'll also prune out any
+ // leftover empty resource husks (left after all of the instances
+ // of a resource with "count" or "for_each" are destroyed) to
+ // help ensure we end up with an _actually_ empty state, assuming
+ // we weren't destroying with -target here.
+ //
+ // (This doesn't actually take into account -target, but that should
+ // be okay because it doesn't throw away anything we can't recompute
+ // on a subsequent "terraform plan" run, if the resources are still
+ // present in the configuration. However, this _will_ cause "count = 0"
+ // resources to read as unknown during the next refresh walk, which
+ // may cause some additional churn if used in a data resource or
+ // provider block, until we remove refreshing as a separate walk and
+ // just do it as part of the plan walk.)
+ c.state.PruneResourceHusks()
+ }
+
+ return c.state, diags
}
// Plan generates an execution plan for the given context.
//
// Plan also updates the diff of this context to be the diff generated
// by the plan, so Apply can be called after.
-func (c *Context) Plan() (*Plan, error) {
+func (c *Context) Plan() (*plans.Plan, tfdiags.Diagnostics) {
defer c.acquireRun("plan")()
+ c.changes = plans.NewChanges()
+
+ var diags tfdiags.Diagnostics
- p := &Plan{
- Module: c.module,
- Vars: c.variables,
- State: c.state,
- Targets: c.targets,
+ varVals := make(map[string]plans.DynamicValue, len(c.variables))
+ for k, iv := range c.variables {
+ // We use cty.DynamicPseudoType here so that we'll save both the
+ // value _and_ its dynamic type in the plan, so we can recover
+ // exactly the same value later.
+ dv, err := plans.NewDynamicValue(iv.Value, cty.DynamicPseudoType)
+ if err != nil {
+ diags = diags.Append(tfdiags.Sourceless(
+ tfdiags.Error,
+ "Failed to prepare variable value for plan",
+ fmt.Sprintf("The value for variable %q could not be serialized to store in the plan: %s.", k, err),
+ ))
+ continue
+ }
+ varVals[k] = dv
+ }
+
+ p := &plans.Plan{
+ VariableValues: varVals,
+ TargetAddrs: c.targets,
+ ProviderSHA256s: c.providerSHA256s,
}
var operation walkOperation
// we replace it back with our old state.
old := c.state
if old == nil {
- c.state = &State{}
- c.state.init()
+ c.state = states.NewState()
} else {
c.state = old.DeepCopy()
}
operation = walkPlan
}
- // Setup our diff
- c.diffLock.Lock()
- c.diff = new(Diff)
- c.diff.init()
- c.diffLock.Unlock()
-
// Build the graph.
graphType := GraphTypePlan
if c.destroy {
graphType = GraphTypePlanDestroy
}
- graph, err := c.Graph(graphType, nil)
- if err != nil {
- return nil, err
+ graph, graphDiags := c.Graph(graphType, nil)
+ diags = diags.Append(graphDiags)
+ if graphDiags.HasErrors() {
+ return nil, diags
}
// Do the walk
- walker, err := c.walk(graph, graph, operation)
- if err != nil {
- return nil, err
+ walker, walkDiags := c.walk(graph, operation)
+ diags = diags.Append(walker.NonFatalDiagnostics)
+ diags = diags.Append(walkDiags)
+ if walkDiags.HasErrors() {
+ return nil, diags
}
- p.Diff = c.diff
-
- // If this is true, it means we're running unit tests. In this case,
- // we perform a deep copy just to ensure that all context tests also
- // test that a diff is copy-able. This will panic if it fails. This
- // is enabled during unit tests.
- //
- // This should never be true during production usage, but even if it is,
- // it can't do any real harm.
- if contextTestDeepCopyOnPlan {
- p.Diff.DeepCopy()
- }
-
- /*
- // We don't do the reverification during the new destroy plan because
- // it will use a different apply process.
- if X_legacyGraph {
- // Now that we have a diff, we can build the exact graph that Apply will use
- // and catch any possible cycles during the Plan phase.
- if _, err := c.Graph(GraphTypeLegacy, nil); err != nil {
- return nil, err
- }
- }
- */
+ p.Changes = c.changes
- var errs error
- if len(walker.ValidationErrors) > 0 {
- errs = multierror.Append(errs, walker.ValidationErrors...)
- }
- return p, errs
+ return p, diags
}
// Refresh goes through all the resources in the state and refreshes them
//
// Even in the case an error is returned, the state may be returned and
// will potentially be partially updated.
-func (c *Context) Refresh() (*State, error) {
+func (c *Context) Refresh() (*states.State, tfdiags.Diagnostics) {
defer c.acquireRun("refresh")()
// Copy our own state
c.state = c.state.DeepCopy()
+ // Refresh builds a partial changeset as part of its work because it must
+ // create placeholder stubs for any resource instances that'll be created
+ // in subsequent plan so that provider configurations and data resources
+ // can interpolate from them. This plan is always thrown away after
+ // the operation completes, restoring any existing changeset.
+ oldChanges := c.changes
+ defer func() { c.changes = oldChanges }()
+ c.changes = plans.NewChanges()
+
// Build the graph.
- graph, err := c.Graph(GraphTypeRefresh, nil)
- if err != nil {
- return nil, err
+ graph, diags := c.Graph(GraphTypeRefresh, nil)
+ if diags.HasErrors() {
+ return nil, diags
}
// Do the walk
- if _, err := c.walk(graph, graph, walkRefresh); err != nil {
- return nil, err
- }
-
- // Clean out any unused things
- c.state.prune()
-
- return c.state, nil
+ _, walkDiags := c.walk(graph, walkRefresh)
+ diags = diags.Append(walkDiags)
+ if walkDiags.HasErrors() {
+ return nil, diags
+ }
+
+ // During our walk we will have created planned object placeholders in
+ // state for resource instances that are in configuration but not yet
+ // created. These were created only to allow expression evaluation to
+ // work properly in provider and data blocks during the walk and must
+ // now be discarded, since a subsequent plan walk is responsible for
+ // creating these "for real".
+ // TODO: Consolidate refresh and plan into a single walk, so that the
+ // refresh walk doesn't need to emulate various aspects of the plan
+ // walk in order to properly evaluate provider and data blocks.
+ c.state.SyncWrapper().RemovePlannedResourceInstanceObjects()
+
+ return c.state, diags
}
// Stop stops the running task.
// Grab the condition var before we exit
if cond := c.runCond; cond != nil {
+ log.Printf("[INFO] terraform: waiting for graceful stop to complete")
cond.Wait()
}
log.Printf("[WARN] terraform: stop complete")
}
-// Validate validates the configuration and returns any warnings or errors.
-func (c *Context) Validate() ([]string, []error) {
+// Validate performs semantic validation of the configuration, and returning
+// any warnings or errors.
+//
+// Syntax and structural checks are performed by the configuration loader,
+// and so are not repeated here.
+func (c *Context) Validate() tfdiags.Diagnostics {
defer c.acquireRun("validate")()
- var errs error
+ var diags tfdiags.Diagnostics
- // Validate the configuration itself
- if err := c.module.Validate(); err != nil {
- errs = multierror.Append(errs, err)
- }
-
- // This only needs to be done for the root module, since inter-module
- // variables are validated in the module tree.
- if config := c.module.Config(); config != nil {
- // Validate the user variables
- if err := smcUserVariables(config, c.variables); len(err) > 0 {
- errs = multierror.Append(errs, err...)
- }
+ // Validate input variables. We do this only for the values supplied
+ // by the root module, since child module calls are validated when we
+ // visit their graph nodes.
+ if c.config != nil {
+ varDiags := checkInputVariables(c.config.Module.Variables, c.variables)
+ diags = diags.Append(varDiags)
}
- // If we have errors at this point, the graphing has no chance,
- // so just bail early.
- if errs != nil {
- return nil, []error{errs}
+ // If we have errors at this point then we probably won't be able to
+ // construct a graph without producing redundant errors, so we'll halt early.
+ if diags.HasErrors() {
+ return diags
}
// Build the graph so we can walk it and run Validate on nodes.
// We also validate the graph generated here, but this graph doesn't
// necessarily match the graph that Plan will generate, so we'll validate the
// graph again later after Planning.
- graph, err := c.Graph(GraphTypeValidate, nil)
- if err != nil {
- return nil, []error{err}
+ graph, graphDiags := c.Graph(GraphTypeValidate, nil)
+ diags = diags.Append(graphDiags)
+ if graphDiags.HasErrors() {
+ return diags
}
// Walk
- walker, err := c.walk(graph, graph, walkValidate)
- if err != nil {
- return nil, multierror.Append(errs, err).Errors
+ walker, walkDiags := c.walk(graph, walkValidate)
+ diags = diags.Append(walker.NonFatalDiagnostics)
+ diags = diags.Append(walkDiags)
+ if walkDiags.HasErrors() {
+ return diags
}
- // Return the result
- rerrs := multierror.Append(errs, walker.ValidationErrors...)
-
- sort.Strings(walker.ValidationWarnings)
- sort.Slice(rerrs.Errors, func(i, j int) bool {
- return rerrs.Errors[i].Error() < rerrs.Errors[j].Error()
- })
-
- return walker.ValidationWarnings, rerrs.Errors
+ return diags
}
-// Module returns the module tree associated with this context.
-func (c *Context) Module() *module.Tree {
- return c.module
+// Config returns the configuration tree associated with this context.
+func (c *Context) Config() *configs.Config {
+ return c.config
}
// Variables will return the mapping of variables that were defined
// for this Context. If Input was called, this mapping may be different
// than what was given.
-func (c *Context) Variables() map[string]interface{} {
+func (c *Context) Variables() InputValues {
return c.variables
}
// SetVariable sets a variable after a context has already been built.
-func (c *Context) SetVariable(k string, v interface{}) {
- c.variables[k] = v
+func (c *Context) SetVariable(k string, v cty.Value) {
+ c.variables[k] = &InputValue{
+ Value: v,
+ SourceType: ValueFromCaller,
+ }
}
func (c *Context) acquireRun(phase string) func() {
// Build our lock
c.runCond = sync.NewCond(&c.l)
- // Setup debugging
- dbug.SetPhase(phase)
-
// Create a new run context
c.runContext, c.runContextCancel = context.WithCancel(context.Background())
c.l.Lock()
defer c.l.Unlock()
- // setting the phase to "INVALID" lets us easily detect if we have
- // operations happening outside of a run, or we missed setting the proper
- // phase
- dbug.SetPhase("INVALID")
-
// End our run. We check if runContext is non-nil because it can be
// set to nil if it was cancelled via Stop()
if c.runContextCancel != nil {
c.runContext = nil
}
-func (c *Context) walk(
- graph, shadow *Graph, operation walkOperation) (*ContextGraphWalker, error) {
- // Keep track of the "real" context which is the context that does
- // the real work: talking to real providers, modifying real state, etc.
- realCtx := c
-
- // If we don't want shadowing, remove it
- if !experiment.Enabled(experiment.X_shadow) {
- shadow = nil
- }
-
- // Just log this so we can see it in a debug log
- if !c.shadow {
- log.Printf("[WARN] terraform: shadow graph disabled")
- shadow = nil
- }
-
- // If we have a shadow graph, walk that as well
- var shadowCtx *Context
- var shadowCloser Shadow
- if shadow != nil {
- // Build the shadow context. In the process, override the real context
- // with the one that is wrapped so that the shadow context can verify
- // the results of the real.
- realCtx, shadowCtx, shadowCloser = newShadowContext(c)
- }
-
+func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, tfdiags.Diagnostics) {
log.Printf("[DEBUG] Starting graph walk: %s", operation.String())
- walker := &ContextGraphWalker{
- Context: realCtx,
- Operation: operation,
- StopContext: c.runContext,
- }
+ walker := c.graphWalker(operation)
// Watch for a stop so we can call the provider Stop() API.
watchStop, watchWait := c.watchStop(walker)
// Walk the real graph, this will block until it completes
- realErr := graph.Walk(walker)
+ diags := graph.Walk(walker)
// Close the channel so the watcher stops, and wait for it to return.
close(watchStop)
<-watchWait
- // If we have a shadow graph and we interrupted the real graph, then
- // we just close the shadow and never verify it. It is non-trivial to
- // recreate the exact execution state up until an interruption so this
- // isn't supported with shadows at the moment.
- if shadowCloser != nil && c.sh.Stopped() {
- // Ignore the error result, there is nothing we could care about
- shadowCloser.CloseShadow()
-
- // Set it to nil so we don't do anything
- shadowCloser = nil
- }
-
- // If we have a shadow graph, wait for that to complete.
- if shadowCloser != nil {
- // Build the graph walker for the shadow. We also wrap this in
- // a panicwrap so that panics are captured. For the shadow graph,
- // we just want panics to be normal errors rather than to crash
- // Terraform.
- shadowWalker := GraphWalkerPanicwrap(&ContextGraphWalker{
- Context: shadowCtx,
- Operation: operation,
- })
-
- // Kick off the shadow walk. This will block on any operations
- // on the real walk so it is fine to start first.
- log.Printf("[INFO] Starting shadow graph walk: %s", operation.String())
- shadowCh := make(chan error)
- go func() {
- shadowCh <- shadow.Walk(shadowWalker)
- }()
-
- // Notify the shadow that we're done
- if err := shadowCloser.CloseShadow(); err != nil {
- c.shadowErr = multierror.Append(c.shadowErr, err)
- }
-
- // Wait for the walk to end
- log.Printf("[DEBUG] Waiting for shadow graph to complete...")
- shadowWalkErr := <-shadowCh
-
- // Get any shadow errors
- if err := shadowCloser.ShadowError(); err != nil {
- c.shadowErr = multierror.Append(c.shadowErr, err)
- }
-
- // Verify the contexts (compare)
- if err := shadowContextVerify(realCtx, shadowCtx); err != nil {
- c.shadowErr = multierror.Append(c.shadowErr, err)
- }
-
- // At this point, if we're supposed to fail on error, then
- // we PANIC. Some tests just verify that there is an error,
- // so simply appending it to realErr and returning could hide
- // shadow problems.
- //
- // This must be done BEFORE appending shadowWalkErr since the
- // shadowWalkErr may include expected errors.
- //
- // We only do this if we don't have a real error. In the case of
- // a real error, we can't guarantee what nodes were and weren't
- // traversed in parallel scenarios so we can't guarantee no
- // shadow errors.
- if c.shadowErr != nil && contextFailOnShadowError && realErr == nil {
- panic(multierror.Prefix(c.shadowErr, "shadow graph:"))
- }
-
- // Now, if we have a walk error, we append that through
- if shadowWalkErr != nil {
- c.shadowErr = multierror.Append(c.shadowErr, shadowWalkErr)
- }
-
- if c.shadowErr == nil {
- log.Printf("[INFO] Shadow graph success!")
- } else {
- log.Printf("[ERROR] Shadow graph error: %s", c.shadowErr)
+ return walker, diags
+}
- // If we're supposed to fail on shadow errors, then report it
- if contextFailOnShadowError {
- realErr = multierror.Append(realErr, multierror.Prefix(
- c.shadowErr, "shadow graph:"))
- }
- }
+func (c *Context) graphWalker(operation walkOperation) *ContextGraphWalker {
+ return &ContextGraphWalker{
+ Context: c,
+ State: c.state.SyncWrapper(),
+ Changes: c.changes.SyncWrapper(),
+ Operation: operation,
+ StopContext: c.runContext,
+ RootVariableValues: c.variables,
}
-
- return walker, realErr
}
// watchStop immediately returns a `stop` and a `wait` chan after dispatching
}
// If we're here, we're stopped, trigger the call.
+ log.Printf("[TRACE] Context: requesting providers and provisioners to gracefully stop")
{
// Copy the providers so that a misbehaved blocking Stop doesn't
// completely hang Terraform.
walker.providerLock.Lock()
- ps := make([]ResourceProvider, 0, len(walker.providerCache))
+ ps := make([]providers.Interface, 0, len(walker.providerCache))
for _, p := range walker.providerCache {
ps = append(ps, p)
}
{
// Call stop on all the provisioners
walker.provisionerLock.Lock()
- ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache))
+ ps := make([]provisioners.Interface, 0, len(walker.provisionerCache))
for _, p := range walker.provisionerCache {
ps = append(ps, p)
}
panic(fmt.Errorf("unknown type %s", targetType.Printable()))
}
}
+
+// ShimLegacyState is a helper that takes the legacy state type and
+// converts it to the new state type.
+//
+// This is implemented as a state file upgrade, so it will not preserve
+// parts of the state structure that are not included in a serialized state,
+// such as the resolved results of any local values, outputs in non-root
+// modules, etc.
+func ShimLegacyState(legacy *State) (*states.State, error) {
+ if legacy == nil {
+ return nil, nil
+ }
+ var buf bytes.Buffer
+ err := WriteState(legacy, &buf)
+ if err != nil {
+ return nil, err
+ }
+ f, err := statefile.Read(&buf)
+ if err != nil {
+ return nil, err
+ }
+ return f.State, err
+}
+
+// MustShimLegacyState is a wrapper around ShimLegacyState that panics if
+// the conversion does not succeed. This is primarily intended for tests where
+// the given legacy state is an object constructed within the test.
+func MustShimLegacyState(legacy *State) *states.State {
+ ret, err := ShimLegacyState(legacy)
+ if err != nil {
+ panic(err)
+ }
+ return ret
+}