Serialization
Encoding atoms for storage and transmission.
Overview
Atoms are designed for type-segregated storage. This cookbook shows patterns for encoding atoms to various formats.
JSON Encoding
Direct Marshaling
Atoms can be marshaled directly to JSON:
import "encoding/json"
atomizer, _ := atom.Use[User]()
user := &User{Name: "Alice", Age: 30}
a := atomizer.Atomize(user)
data, err := json.Marshal(a)
if err != nil {
log.Fatal(err)
}
// {"Strings":{"Name":"Alice"},"Ints":{"Age":30},...}
Unmarshaling
var restored atom.Atom
if err := json.Unmarshal(data, &restored); err != nil {
log.Fatal(err)
}
user, err := atomizer.Deatomize(&restored)
Compact JSON
For smaller payloads, omit empty maps:
type CompactAtom struct {
Spec atom.Spec `json:"spec,omitempty"`
Strings map[string]string `json:"s,omitempty"`
Ints map[string]int64 `json:"i,omitempty"`
Floats map[string]float64 `json:"f,omitempty"`
Bools map[string]bool `json:"b,omitempty"`
// ... other fields
}
func ToCompact(a *atom.Atom) CompactAtom {
return CompactAtom{
Spec: a.Spec,
Strings: a.Strings,
Ints: a.Ints,
Floats: a.Floats,
Bools: a.Bools,
}
}
MessagePack
For binary efficiency:
import "github.com/vmihailenco/msgpack/v5"
// Encode
data, err := msgpack.Marshal(a)
// Decode
var restored atom.Atom
err = msgpack.Unmarshal(data, &restored)
Protocol Buffers
Define a proto schema:
message Atom {
map<string, string> strings = 1;
map<string, int64> ints = 2;
map<string, double> floats = 3;
map<string, bool> bools = 4;
map<string, bytes> bytes = 5;
map<string, Atom> nested = 6;
// ... etc
}
Convert between atom and proto:
func ToProto(a *atom.Atom) *pb.Atom {
return &pb.Atom{
Strings: a.Strings,
Ints: a.Ints,
Floats: a.Floats,
Bools: a.Bools,
// ...
}
}
func FromProto(p *pb.Atom) *atom.Atom {
return &atom.Atom{
Strings: p.Strings,
Ints: p.Ints,
Floats: p.Floats,
Bools: p.Bools,
// ...
}
}
Key-Value Storage
Redis Hashes
Store each table as a hash:
import "github.com/redis/go-redis/v9"
func StoreAtom(ctx context.Context, rdb *redis.Client, key string, a *atom.Atom) error {
pipe := rdb.Pipeline()
// Store strings
if len(a.Strings) > 0 {
args := make([]any, 0, len(a.Strings)*2)
for k, v := range a.Strings {
args = append(args, k, v)
}
pipe.HSet(ctx, key+":strings", args...)
}
// Store ints
if len(a.Ints) > 0 {
args := make([]any, 0, len(a.Ints)*2)
for k, v := range a.Ints {
args = append(args, k, v)
}
pipe.HSet(ctx, key+":ints", args...)
}
// ... other tables
_, err := pipe.Exec(ctx)
return err
}
Partial Reads
Read only specific fields:
func LoadStringField(ctx context.Context, rdb *redis.Client, key, field string) (string, error) {
return rdb.HGet(ctx, key+":strings", field).Result()
}
func LoadIntField(ctx context.Context, rdb *redis.Client, key, field string) (int64, error) {
return rdb.HGet(ctx, key+":ints", field).Int64()
}
Column-Oriented Storage
Per-Type Tables
Store each atom table in a separate database table:
CREATE TABLE atom_strings (
entity_id TEXT,
field_name TEXT,
value TEXT,
PRIMARY KEY (entity_id, field_name)
);
CREATE TABLE atom_ints (
entity_id TEXT,
field_name TEXT,
value BIGINT,
PRIMARY KEY (entity_id, field_name)
);
func StoreAtom(db *sql.DB, entityID string, a *atom.Atom) error {
tx, _ := db.Begin()
for field, value := range a.Strings {
tx.Exec(`INSERT INTO atom_strings VALUES (?, ?, ?)
ON CONFLICT DO UPDATE SET value = ?`,
entityID, field, value, value)
}
for field, value := range a.Ints {
tx.Exec(`INSERT INTO atom_ints VALUES (?, ?, ?)
ON CONFLICT DO UPDATE SET value = ?`,
entityID, field, value, value)
}
return tx.Commit()
}
Querying by Field
-- Find users over 30
SELECT entity_id FROM atom_ints
WHERE field_name = 'Age' AND value > 30;
-- Find users by name
SELECT entity_id FROM atom_strings
WHERE field_name = 'Name' AND value = 'Alice';
Binary Format
Custom binary encoding for maximum efficiency:
import "encoding/binary"
func EncodeAtom(a *atom.Atom) []byte {
buf := new(bytes.Buffer)
// Write string count and entries
binary.Write(buf, binary.LittleEndian, uint32(len(a.Strings)))
for k, v := range a.Strings {
writeString(buf, k)
writeString(buf, v)
}
// Write int count and entries
binary.Write(buf, binary.LittleEndian, uint32(len(a.Ints)))
for k, v := range a.Ints {
writeString(buf, k)
binary.Write(buf, binary.LittleEndian, v)
}
// ... other tables
return buf.Bytes()
}
func writeString(buf *bytes.Buffer, s string) {
binary.Write(buf, binary.LittleEndian, uint32(len(s)))
buf.WriteString(s)
}
Streaming
For large datasets, stream atoms:
type AtomStream struct {
encoder *json.Encoder
}
func (s *AtomStream) Write(a *atom.Atom) error {
return s.encoder.Encode(a)
}
// Usage
file, _ := os.Create("atoms.jsonl")
stream := &AtomStream{encoder: json.NewEncoder(file)}
for _, user := range users {
a := atomizer.Atomize(user)
stream.Write(a)
}
Compression
Combine with compression:
import "compress/gzip"
func CompressAtom(a *atom.Atom) ([]byte, error) {
var buf bytes.Buffer
gz := gzip.NewWriter(&buf)
if err := json.NewEncoder(gz).Encode(a); err != nil {
return nil, err
}
gz.Close()
return buf.Bytes(), nil
}
Best Practices
Version Your Format
type VersionedAtom struct {
Version int `json:"v"`
Atom atom.Atom `json:"a"`
}
func Encode(a *atom.Atom) []byte {
va := VersionedAtom{Version: 1, Atom: *a}
data, _ := json.Marshal(va)
return data
}
Handle Missing Fields
func LoadAtom(data []byte) (*atom.Atom, error) {
var a atom.Atom
if err := json.Unmarshal(data, &a); err != nil {
return nil, err
}
// Initialize nil maps
if a.Strings == nil {
a.Strings = make(map[string]string)
}
// ...
return &a, nil
}
Next Steps
- Migrations Cookbook - Schema evolution
- API Reference - Complete API