Schema Migrations
Handling evolving data structures.
Overview
When struct definitions change, atoms serialized with old schemas need migration. This cookbook covers common migration patterns.
Field Renames
Old Schema
type User struct {
EmailAddress string // v1
}
New Schema
type User struct {
Email string // v2: renamed from EmailAddress
}
Migration
Using Deatomizable:
func (u *User) Deatomize(a *atom.Atom) error {
// Try new name first
if email, ok := a.Strings["Email"]; ok {
u.Email = email
} else if email, ok := a.Strings["EmailAddress"]; ok {
// Fall back to old name
u.Email = email
}
return nil
}
func (u *User) Atomize(a *atom.Atom) {
// Always write new name
a.Strings["Email"] = u.Email
}
Field Type Changes
Old: String ID
type User struct {
ID string // v1: string ID
}
New: Integer ID
type User struct {
ID int64 // v2: numeric ID
}
Migration
func (u *User) Deatomize(a *atom.Atom) error {
// Try new type first
if id, ok := a.Ints["ID"]; ok {
u.ID = id
} else if idStr, ok := a.Strings["ID"]; ok {
// Convert old string format
id, err := strconv.ParseInt(idStr, 10, 64)
if err != nil {
return fmt.Errorf("invalid ID format: %s", idStr)
}
u.ID = id
}
return nil
}
New Required Fields
Adding with Default
type User struct {
Name string
Active bool // v2: new field
}
func (u *User) Deatomize(a *atom.Atom) error {
u.Name = a.Strings["Name"]
if active, ok := a.Bools["Active"]; ok {
u.Active = active
} else {
// Default for old data
u.Active = true
}
return nil
}
Removed Fields
Graceful Ignore
Old atoms may have fields that no longer exist:
// Old schema had "MiddleName" field
// New schema removed it
// Deatomize simply doesn't read it
func (u *User) Deatomize(a *atom.Atom) error {
u.FirstName = a.Strings["FirstName"]
u.LastName = a.Strings["LastName"]
// MiddleName is ignored if present
return nil
}
Nested Structure Changes
Flattening
// Old: nested
type UserV1 struct {
Profile struct {
Bio string
}
}
// New: flattened
type UserV2 struct {
Bio string
}
func (u *UserV2) Deatomize(a *atom.Atom) error {
// Try flat first
if bio, ok := a.Strings["Bio"]; ok {
u.Bio = bio
} else if profile, ok := a.Nested["Profile"]; ok {
// Fall back to nested
u.Bio = profile.Strings["Bio"]
}
return nil
}
Nesting
// Old: flat
type UserV1 struct {
Street string
City string
}
// New: nested
type UserV2 struct {
Address Address
}
func (u *UserV2) Deatomize(a *atom.Atom) error {
if addr, ok := a.Nested["Address"]; ok {
// New format
u.Address.Street = addr.Strings["Street"]
u.Address.City = addr.Strings["City"]
} else {
// Old flat format
u.Address.Street = a.Strings["Street"]
u.Address.City = a.Strings["City"]
}
return nil
}
Versioned Atoms
Explicit Version Field
type VersionedAtom struct {
Version int
Data atom.Atom
}
func Migrate(va VersionedAtom) (*atom.Atom, error) {
a := &va.Data
switch va.Version {
case 1:
migrateV1ToV2(a)
fallthrough
case 2:
migrateV2ToV3(a)
fallthrough
case 3:
// Current version
default:
return nil, fmt.Errorf("unknown version: %d", va.Version)
}
return a, nil
}
func migrateV1ToV2(a *atom.Atom) {
// Rename EmailAddress → Email
if email, ok := a.Strings["EmailAddress"]; ok {
a.Strings["Email"] = email
delete(a.Strings, "EmailAddress")
}
}
Version in Spec
Use the Spec field for version tracking:
type User struct {
Name string
}
atomizer, _ := atom.Use[User]()
a := atomizer.Atomize(&User{Name: "Alice"})
// Spec contains type info
// Can be extended with version metadata
Batch Migration
Migrate Stored Data
func MigrateDatabase(db *sql.DB) error {
rows, err := db.Query("SELECT id, data FROM atoms")
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
var id string
var data []byte
rows.Scan(&id, &data)
var a atom.Atom
json.Unmarshal(data, &a)
// Apply migrations
migrateV1ToV2(&a)
newData, _ := json.Marshal(a)
db.Exec("UPDATE atoms SET data = ? WHERE id = ?", newData, id)
}
return nil
}
Lazy Migration
Migrate on read:
func LoadUser(id string) (*User, error) {
data := storage.Get(id)
var a atom.Atom
json.Unmarshal(data, &a)
// Check if migration needed
if _, ok := a.Strings["EmailAddress"]; ok {
migrateV1ToV2(&a)
// Write back migrated version
newData, _ := json.Marshal(a)
storage.Set(id, newData)
}
return atomizer.Deatomize(&a)
}
Testing Migrations
func TestMigrationV1ToV2(t *testing.T) {
// V1 format
v1 := &atom.Atom{
Strings: map[string]string{
"EmailAddress": "alice@example.com",
},
}
// Migrate
migrateV1ToV2(v1)
// Verify
if v1.Strings["Email"] != "alice@example.com" {
t.Error("Email not migrated")
}
if _, ok := v1.Strings["EmailAddress"]; ok {
t.Error("Old field not removed")
}
}
func TestBackwardsCompatibility(t *testing.T) {
atomizer, _ := atom.Use[User]()
// Load old format
oldData := `{"Strings":{"EmailAddress":"alice@example.com"}}`
var a atom.Atom
json.Unmarshal([]byte(oldData), &a)
// Deatomize should handle old format
user, err := atomizer.Deatomize(&a)
if err != nil {
t.Fatal(err)
}
if user.Email != "alice@example.com" {
t.Errorf("expected alice@example.com, got %s", user.Email)
}
}
Best Practices
Backwards Compatibility
Always support reading old formats:
func (u *User) Deatomize(a *atom.Atom) error {
// Support both old and new field names
// Support both old and new types
// Default missing fields appropriately
}
Forward Compatibility
Ignore unknown fields when reading:
// Future versions may add fields
// Current code should not fail on unknown fields
Document Versions
// Version history:
// v1 (2024-01): Initial schema
// v2 (2024-03): Renamed EmailAddress → Email
// v3 (2024-06): Added Active field (default: true)
Next Steps
- API Reference - Complete API
- Tables Reference - All table types