Elixir and Ecto: A DSL for testing schemas and changesets
This post is part of the Ecto Series and the Testing Series.
“Testing Elixir: Effective and Robust Testing for Elixir and its Ecosystem” by Andrea Leopardi and Jeffrey Matthias is an excellent book on this topic.
Creating a New Test Case Template
ExUnit’s CaseTemplate
is a tool for creating a domain-specific language (DSL) tailored to a specific testing scenario.
Setting Up a ModelCase
The setup in ModelCase
acts like a DSL for testing Ecto schemas and changesets, abstracting away boilerplate to make it easier to focus on the testing logic.
Create ModelCase
in test/support/model_case.ex
:
defmodule GameApp.ModelCase do
use ExUnit.CaseTemplate
using do
quote do
alias Ecto.Changeset
import GameApp.ModelCase
end
end
setup _ do
Ecto.Adapters.SQL.Sandbox.mode(GameApp.Repo, :manual)
end
end
use ExUnit.CaseTemplate
: Sets up theModelCase
module as a template for test cases usingExUnit.CaseTemplate
.quote do ... end
: Thequote
block captures the code within it as a quoted expression (an abstract syntax tree or AST) rather than executing it immediately. This is part of Elixir’s metaprogramming capabilities, allowing the code inside thequote
block to be injected into other modules thatuse GameApp.ModelCase
, making the setup reusable across multiple test modules.alias Ecto.Changeset
: Provides a shortcut so thatChangeset
can be referenced directly in tests.import GameApp.ModelCase
: Allows test modules thatuse GameApp.ModelCase
to inheritModelCase
helper functions.
setup _ do ... end
: This block runs before each test, configuring the Ecto SQL Sandbox:Ecto.Adapters.SQL.Sandbox.mode(GameApp.Repo, :manual)
: Sets up a sandboxed connection, isolating each test’s database changes to ensure tests don’t interfere with one another.
Helper Functions
Helper functions in ModelCase
are tailored to test schemas and changesets.
Assert the Schema Matches the Expected Shape
def assert_schema_fields_and_types(schema_module, expected_fields_with_types) do
actual_fields_with_types =
for field <- schema_module.__schema__(:fields) do
type = schema_module.__schema__(:type, field)
{field, type}
end
assert Enum.sort(actual_fields_with_types) ==
Enum.sort(expected_fields_with_types),
"Expected fields and types do not match the actual schema fields and types. " <>
"Expected: #{inspect(Enum.sort(expected_fields_with_types))}, " <>
"Got: #{inspect(Enum.sort(actual_fields_with_types))}"
schema_module
end
The assert_schema_fields_and_types/2
function confirms the test file’s @expected_fields_with_types
matches the module’s Ecto generated struct.
Generate Valid Parameters
The valid_params/2
function creates valid random values for testing each data type:
def valid_params(fields_with_types, overrides \\ %{}) do
valid_value_by_type = %{
:id => fn -> Enum.random(1..10_000_000) end,
:date_of_birth => fn -> to_string(Faker.Date.date_of_birth()) end,
:float => fn -> :rand.uniform() * 10 end,
:string => fn -> Faker.Lorem.word() end,
:binary_id => fn -> Ecto.UUID.generate() end,
:integer => fn -> Enum.random(-1_000..1_000) end,
:naive_datetime => fn -> NaiveDateTime.utc_now() end,
:boolean => fn -> Enum.random([true, false]) end
}
fields_with_types
|> Enum.map(fn {field, type} ->
generator_function = Map.get(valid_value_by_type, type)
if is_nil(generator_function) do
raise ArgumentError, "No generator function defined for type: #{inspect(type)}"
end
{field, generator_function.()}
end)
|> Enum.into(%{})
|> Map.merge(overrides)
end
Generate Invalid Parameters
The invalid_params/2
function generates random invalid values for counterfactual testing.
def invalid_params(fields_with_types, overrides \\ %{}) do
invalid_value_by_type = %{
:id => fn -> Faker.Lorem.word() end,
:date => fn -> Faker.Lorem.word() end,
:float => fn -> Faker.Lorem.word() end,
:string => fn -> DateTime.utc_now() end,
:binary_id => fn -> 1 end,
:integer => fn -> Faker.Lorem.word() end,
:naive_datetime => fn -> Faker.Lorem.word() end,
:boolean => fn -> Faker.Lorem.word() end
}
fields_with_types
|> Enum.map(fn {field, type} ->
generator_function = Map.get(invalid_value_by_type, type)
if is_nil(generator_function) do
raise ArgumentError, "No generator function defined for type: #{inspect(type)}"
end
{field, generator_function.()}
end)
|> Enum.into(%{})
|> Map.merge(overrides)
end
Notice that both generators include is_nil
error logic, which throws helpful errors if you attempt to test a type that isn’t defined in the generator.
Assert Changeset Works as Expected
def assert_changes_correct(changeset, attrs, fields_with_types, excluded_fields \\ []) do
for {field, _} <- fields_with_types, field not in excluded_fields do
actual = Map.get(changeset.changes, field)
expected = attrs[Atom.to_string(field)]
assert actual == expected,
"Values did not match for field: #{field}\nexpected: #{inspect(expected)}\nactual: #{inspect(actual)}"
end
changeset
end
The function assert_changes_correct/4
confirms that passing valid fields to our changeset does not produce unexpected errors.
Assert Invalid Changeset Errors
def assert_invalid_changeset_errors(changeset, fields_with_types, optional_fields) do
assert %Ecto.Changeset{valid?: false, errors: errors} = changeset
error_messages =
Enum.reduce(fields_with_types, [], fn {field, _}, acc ->
if field in optional_fields do
acc
else
acc ++ generate_error_message(field, errors[field])
end
end)
assert error_messages == [],
"Errors encountered:\n" <> Enum.join(error_messages, "\n")
changeset
end
The assert_invalid_changeset_errors/3
function is the counterfactual, verifying that error messages are present and correct, ensuring that no invalid data slips through unnoticed.
Assert has errors
@spec assert_has_errors(Ecto.Changeset.t(), [atom()]) :: Ecto.Changeset.t()
def assert_has_errors(changeset, fields) do
Enum.each(fields, fn field ->
assert Enum.any?(changeset.errors, fn
{field_key, _} -> field_key == field
_ -> false
end),
"Expected error for field #{inspect(field)} but none found"
end)
changeset
end
The assert_has_errors/2
helper function confirms errors are present when expected.
The Test File
Typically, we avoid including logic in our test files, as it can obscure the reasons behind test failures. However, we make an exception for Ecto schemas and changesets to reduce boilerplate.
defmodule GameApp.Accounts.PlayerTest do
use GameApp.ModelCase
alias GameApp.Accounts.Player
@moduletag :player
@expected_fields_with_types [
{:id, :id},
{:name, :string},
{:email, :string},
{:score, :integer},
{:inserted_at, :naive_datetime},
{:updated_at, :naive_datetime}
]
@excluded [:id, :inserted_at, :updated_at]
end
- @expected_fields_with_types: A list of tuples specifying expected fields and their types, used to verify schema structure.
- @excluded: Fields excluded from certain tests, typically auto-generated values like IDs and timestamps.
Test Schema
Verify the Player
schema aligns with the expected values:
describe "player schema" do
@tag :schema
test "has the correct fields and types" do
assert_schema_fields_and_types(Player, @expected_fields_with_types)
end
end
This is a quick test to ensure that the Player
schema matches our @expected_fields_with_types
, giving us an early and clear indication of any discrepancies.
Testing the Affirmative
describe "create_changeset/1" do
@tag :changeset
test "success: returns a valid changeset when given valid arguments" do
attrs =
valid_params(@expected_fields_with_types)
|> Map.put("email", Faker.Internet.email())
|> Map.put("score", Faker.random_between(0, 100_000))
changeset = Player.create_changeset(attrs)
assert changeset.valid?, "Expected changeset to be valid"
changeset
|> assert_changes_correct(attrs, @expected_fields_with_types, @excluded)
end
end
This test confirms that create_changeset/1
correctly handles valid inputs, validating the business logic for email and score constraints. It also ensures that the expected fields are correctly included in the changeset.
Testing the Counterfactual
Ensure the changeset correctly handles invalid conditions.
Add assert_invalid_changeset_errors/3
Insert a test in the create_changeset/1
block to handle invalid inputs effectively:
test "failure: cannot cast invalid values" do
attrs = invalid_params(@expected_fields_with_types)
changeset = Player.create_changeset(attrs)
refute changeset.valid?, "Expected changeset to be invalid due to uncastable values"
assert_invalid_changeset_errors(changeset, @expected_fields_with_types, @excluded)
end
This test ensures that the changeset correctly identifies and rejects invalid values.
Test the Business Logic
Create Changeset
Verify different scenarios to ensure the business logic holds true:
- The initial
score
can be missing, but not negative. - The
email
must be valid.
test "failure: score cannot be negative" do
negative_score = Faker.random_between(-100_000, -1)
attrs =
valid_params(@expected_fields_with_types)
|> Map.put(:email, Faker.Internet.email())
|> Map.put(:score, negative_score)
changeset = Player.create_changeset(%Player{}, attrs)
refute changeset.valid?, "Expected changeset to be invalid due to negative score"
assert_has_errors(changeset, [:score])
end
test "failure: email must include an '@' sign" do
invalid_email = Faker.Lorem.word()
attrs =
valid_params(@expected_fields_with_types)
|> Map.put(:email, invalid_email)
|> Map.put(:score, Faker.random_between(0, 100_000))
changeset = Player.create_changeset(%Player{}, attrs)
refute changeset.valid?, "Expected changeset to be invalid due to incorrect email format"
assert_has_errors(changeset, [:email])
end
test "success: missing score is valid" do
attrs =
valid_params(@expected_fields_with_types)
|> Map.put(:email, Faker.Internet.email())
|> Map.delete(:score)
changeset = Player.create_changeset(%Player{}, attrs)
assert changeset.valid?, "Expected changeset to be valid despite missing score"
changeset
|> assert_changes_correct(attrs, @expected_fields_with_types, @excluded)
end
Update Changeset
The update_changeset/2
validates business constraints:
- The
email
field must not be updated. - The
score
must not be decremented.
describe "update_changeset/2" do
@tag :changeset
setup do
player = %Player{name: Faker.Person.name(), email: Faker.Internet.email(), score: 10}
{:ok, player: player}
end
test "success: returns a valid changeset when given valid arguments", %{player: player} do
increment_score = player.score + 10
attrs =
valid_params(@expected_fields_with_types)
|> Map.put("name", Faker.Person.name())
|> Map.put("score", increment_score)
changeset = Player.update_changeset(player, attrs)
assert changeset.valid?, "Expected changeset to be valid"
assert_changes_correct(changeset, attrs, @expected_fields_with_types, @excluded ++ [:email])
end
test "failure: cannot cast invalid values", %{player: player} do
attrs = invalid_params(@expected_fields_with_types)
changeset = Player.update_changeset(player, attrs)
refute changeset.valid?, "Expected changeset to be invalid due to uncastable values"
assert_invalid_changeset_errors(
changeset,
@expected_fields_with_types,
@excluded ++ [:email]
)
end
test "failure: cannot decrement a score", %{player: player} do
decrement_score = player.score - 1
attrs =
valid_params(@expected_fields_with_types)
|> Map.put("score", decrement_score)
changeset = Player.update_changeset(player, attrs)
refute changeset.valid?, "Expected changeset to be invalid due to score decrement"
assert_has_errors(changeset, [:score])
end
end
Conclusion
Notice that now the boilerplate has been moved out into the ModelCase
, the tests for update_changeset/2
are easy to understand and maintain. If the player
schema adds a new field, such as nickname
, the only thing that needs to be updated is to add nickname
to @expected_fields_with_types
and verify it acts as expected within the business rules.
It’s a valid point that testing too many internals of a library like Ecto might not always be necessary or beneficial. Generally, you want your tests to focus on the business logic specific to your application rather than re-testing well-established library functionality. However, there are cases where testing certain internals might be justified, especially if you have custom validations or business rules that interact closely with Ecto’s features.