Fri, Jan 9, 2026 Latest

Blob inclusion

Analysis of blob inclusion patterns in Ethereum mainnet blocks.

Blobs included per slot

Each dot represents a slot, colored by the number of blobs included (0–9). This shows the temporal distribution of blob activity—gaps indicate missed slots or blocks without blobs.

View query
SELECT
    s.slot AS slot,
    s.slot_start_date_time AS time,
    COALESCE(b.blob_count, 0) AS blob_count
FROM (
    SELECT DISTINCT slot, slot_start_date_time
    FROM default.canonical_beacon_block
    WHERE meta_network_name = 'mainnet'
      AND slot_start_date_time >= '2026-01-09' AND slot_start_date_time < '2026-01-09'::date + INTERVAL 1 DAY
) s
LEFT JOIN (
    SELECT
        slot,
        COUNT(*) AS blob_count
    FROM default.canonical_beacon_blob_sidecar
    WHERE meta_network_name = 'mainnet'
      AND slot_start_date_time >= '2026-01-09' AND slot_start_date_time < '2026-01-09'::date + INTERVAL 1 DAY
    GROUP BY slot
) b ON s.slot = b.slot
ORDER BY s.slot ASC
Show code
df_blobs_per_slot = load_parquet("blobs_per_slot", target_date)

fig = px.scatter(
    df_blobs_per_slot,
    x="time",
    y="blob_count",
    color="blob_count",
    color_continuous_scale="YlOrRd",
    labels={"time": "Time", "blob_count": "Blob Count", "slot": "Slot"},
    template="plotly",
    hover_data={"slot": True, "time": True, "blob_count": True},
)
fig.update_traces(
    marker=dict(size=3),
    hovertemplate="<b>Slot:</b> %{customdata[0]}<br><b>Time:</b> %{x}<br><b>Blob Count:</b> %{y}<extra></extra>",
)
fig.update_layout(
    margin=dict(l=60, r=30, t=30, b=60),
    autosize=True,
    showlegend=False,
    yaxis=dict(dtick=1, range=[-0.5, 15.5], title=dict(standoff=10)),
    xaxis=dict(title=dict(standoff=15)),
    coloraxis_colorbar=dict(title=dict(text="Blobs", side="right")),
    height=600,
)
fig.show(config={"responsive": True})

Blob count breakdown per epoch

Stacked bar chart showing how blocks within each epoch are distributed by blob count. Each bar represents one epoch (32 slots), with colors indicating the number of blobs in each block.

View query
WITH blob_counts_per_slot AS (
    SELECT
        slot,
        epoch,
        epoch_start_date_time,
        slot_start_date_time,
        toUInt64(max(blob_index) + 1) as blob_count
    FROM canonical_beacon_blob_sidecar
    WHERE meta_network_name = 'mainnet'
      AND slot_start_date_time >= '2026-01-09' AND slot_start_date_time < '2026-01-09'::date + INTERVAL 1 DAY
    GROUP BY slot, epoch, epoch_start_date_time, slot_start_date_time
),
blocks_per_epoch AS (
    SELECT
        epoch,
        epoch_start_date_time,
        toUInt64(COUNT(*)) as total_blocks
    FROM canonical_beacon_block
    WHERE meta_network_name = 'mainnet'
      AND slot_start_date_time >= '2026-01-09' AND slot_start_date_time < '2026-01-09'::date + INTERVAL 1 DAY
    GROUP BY epoch, epoch_start_date_time
),
epochs AS (
    SELECT DISTINCT epoch, epoch_start_date_time
    FROM blocks_per_epoch
),
all_blob_counts AS (
    SELECT arrayJoin(range(toUInt64(0), toUInt64(max(blob_count) + 1))) AS blob_count
    FROM blob_counts_per_slot
),
all_combinations AS (
    SELECT
        e.epoch,
        e.epoch_start_date_time,
        b.blob_count
    FROM epochs e
    CROSS JOIN all_blob_counts b
),
block_per_blob_count_per_epoch AS (
    SELECT
        epoch,
        epoch_start_date_time,
        blob_count,
        toUInt64(COUNT(*)) as block_count
    FROM blob_counts_per_slot
    GROUP BY epoch, epoch_start_date_time, blob_count
),
blocks_with_blobs_per_epoch AS (
    SELECT
        epoch,
        toUInt64(COUNT(*)) as blocks_with_blobs
    FROM blob_counts_per_slot
    GROUP BY epoch
)
SELECT
    a.epoch AS epoch,
    a.epoch_start_date_time AS time,
    a.blob_count AS blob_count,
    CASE
        WHEN a.blob_count = 0 THEN
            toInt64(COALESCE(blk.total_blocks, toUInt64(0))) - toInt64(COALESCE(wb.blocks_with_blobs, toUInt64(0)))
        ELSE
            toInt64(COALESCE(b.block_count, toUInt64(0)))
    END as block_count
FROM all_combinations a
GLOBAL LEFT JOIN block_per_blob_count_per_epoch b
    ON a.epoch = b.epoch AND a.blob_count = b.blob_count
GLOBAL LEFT JOIN blocks_per_epoch blk
    ON a.epoch = blk.epoch
GLOBAL LEFT JOIN blocks_with_blobs_per_epoch wb
    ON a.epoch = wb.epoch
ORDER BY a.epoch ASC, a.blob_count ASC
Show code
df_blocks_blob_epoch = load_parquet("blocks_blob_epoch", target_date)

# Format blob count as "XX blobs" for display (moved from SQL for cleaner queries)
df_blocks_blob_epoch["series"] = df_blocks_blob_epoch["blob_count"].apply(lambda x: f"{int(x):02d} blobs")

chart = (
    alt.Chart(df_blocks_blob_epoch)
    .mark_bar()
    .encode(
        x=alt.X("time:T"),
        y=alt.Y("block_count:Q", stack="zero", scale=alt.Scale(domain=[0, 32]), axis=alt.Axis(tickCount=32, labelAngle=-45)),
        color=alt.Color(
            "series:N",
            sort="ascending",
            scale=alt.Scale(scheme="inferno"),
        ),
        order=alt.Order("series:N", sort="ascending"),
        tooltip=[
            alt.Tooltip("time:T", title="Epoch Time"),
            alt.Tooltip("epoch:Q", title="Epoch"),
            alt.Tooltip("series:N", title="Blob Count"),
            alt.Tooltip("block_count:Q", title="Block Count"),
        ],
    )
    .properties(height=600, width=800)
)
chart

Blob count popularity per epoch

Heatmap showing which blob counts are most common over time. Brighter cells indicate more blocks with that blob count during the epoch. Useful for spotting trends in blob usage patterns.

View query
WITH blob_counts_per_slot AS (
    SELECT
        slot_start_date_time,
        epoch_start_date_time,
        toUInt64(max(blob_index) + 1) as blob_count
    FROM canonical_beacon_blob_sidecar
    WHERE meta_network_name = 'mainnet'
      AND slot_start_date_time >= '2026-01-09' AND slot_start_date_time < '2026-01-09'::date + INTERVAL 1 DAY
    GROUP BY slot, slot_start_date_time, epoch_start_date_time
),
blocks AS (
    SELECT
        epoch,
        slot_start_date_time,
        epoch_start_date_time
    FROM canonical_beacon_block
    WHERE meta_network_name = 'mainnet'
      AND slot_start_date_time >= '2026-01-09' AND slot_start_date_time < '2026-01-09'::date + INTERVAL 1 DAY
),
blocks_with_blob_count AS (
    SELECT
        b.epoch,
        b.epoch_start_date_time as time,
        COALESCE(bc.blob_count, toUInt64(0)) as blob_count
    FROM blocks b
    GLOBAL LEFT JOIN blob_counts_per_slot bc ON b.slot_start_date_time = bc.slot_start_date_time
)
SELECT
    epoch,
    time,
    blob_count,
    COUNT(*) as count
FROM blocks_with_blob_count
GROUP BY epoch, time, blob_count
ORDER BY epoch ASC, blob_count ASC
Show code
df_blob_popularity = load_parquet("blob_popularity", target_date)

# Pivot for heatmap
df_pivot = df_blob_popularity.pivot(index="blob_count", columns="time", values="count").fillna(0)

# Create epoch lookup for hover data
epoch_lookup = df_blob_popularity.drop_duplicates(subset=["time"]).set_index("time")["epoch"].to_dict()

fig = go.Figure(
    data=go.Heatmap(
        z=df_pivot.values,
        x=df_pivot.columns,
        y=[str(int(b)) for b in df_pivot.index],
        colorscale="inferno",
        reversescale=False,
        colorbar=dict(title="Block Count"),
        customdata=[[epoch_lookup.get(t, "") for t in df_pivot.columns] for _ in df_pivot.index],
        hovertemplate="<b>Epoch Time:</b> %{x}<br><b>Epoch:</b> %{customdata}<br><b>Blob Count:</b> %{y}<br><b>Block Count:</b> %{z}<extra></extra>",
    ),
)
fig.update_layout(
    template="plotly_white",
    title="Blob count popularity by epoch",
    xaxis_title="Epoch Start Time",
    yaxis_title="Blob Count",
    height=500,
)
fig.show()

Blob count per slot (vertical layout)

Detailed view of blob counts for each slot position (0–31) within epochs. Each row is an epoch, each column is a slot position. Reveals patterns like whether certain slot positions consistently have more or fewer blobs.

View query
WITH blob_counts_per_slot AS (
    SELECT
        slot,
        epoch,
        epoch_start_date_time,
        toUInt64(max(blob_index) + 1) as blob_count
    FROM canonical_beacon_blob_sidecar
    WHERE meta_network_name = 'mainnet'
      AND slot_start_date_time >= '2026-01-09' AND slot_start_date_time < '2026-01-09'::date + INTERVAL 1 DAY
    GROUP BY slot, epoch, epoch_start_date_time
),
blocks AS (
    SELECT
        slot,
        epoch,
        epoch_start_date_time
    FROM canonical_beacon_block
    WHERE meta_network_name = 'mainnet'
      AND slot_start_date_time >= '2026-01-09' AND slot_start_date_time < '2026-01-09'::date + INTERVAL 1 DAY
),
blocks_with_blob_count AS (
    SELECT
        b.slot,
        b.epoch,
        b.epoch_start_date_time,
        b.slot - (b.epoch * 32) as slot_in_epoch,
        COALESCE(bc.blob_count, toUInt64(0)) as blob_count
    FROM blocks b
    GLOBAL LEFT JOIN blob_counts_per_slot bc
        ON b.slot = bc.slot AND b.epoch = bc.epoch
)
SELECT
    slot,
    epoch,
    epoch_start_date_time as time,
    slot_in_epoch,
    blob_count
FROM blocks_with_blob_count
ORDER BY epoch ASC, slot_in_epoch ASC
Show code
df_slot_in_epoch = load_parquet("slot_in_epoch", target_date)

df_pivot = df_slot_in_epoch.pivot(index="slot_in_epoch", columns="time", values="blob_count").fillna(0)

# Create slot lookup for hover data (slot number for each cell)
df_slot_pivot = df_slot_in_epoch.pivot(index="slot_in_epoch", columns="time", values="slot").fillna(0)

# Create epoch lookup for hover data
epoch_lookup = df_slot_in_epoch.drop_duplicates(subset=["time"]).set_index("time")["epoch"].to_dict()

# Build customdata with [slot, epoch] for each cell
customdata = np.dstack([
    df_slot_pivot.values.T,
    [[epoch_lookup.get(t, "") for _ in df_pivot.index] for t in df_pivot.columns]
])

fig = go.Figure(
    data=go.Heatmap(
        z=df_pivot.values.T,
        x=[str(int(s)) for s in df_pivot.index],
        y=df_pivot.columns,
        colorscale="thermal",
        reversescale=True,
        colorbar=dict(
            orientation="h",
            y=-0.075,
            yanchor="top",
            x=0,
            xanchor="left",
            len=1,
        ),
        customdata=customdata,
        hovertemplate="<b>Slot:</b> %{customdata[0]:.0f}<br><b>Epoch:</b> %{customdata[1]}<br><b>Slot in Epoch:</b> %{x}<br><b>Blob Count:</b> %{z}<extra></extra>",
    )
)
fig.update_layout(
    title="Blob count per slot within epoch",
    xaxis_title="Slot in Epoch",
    yaxis_title="Epoch Start Time",
    yaxis=dict(autorange="reversed"),
    height=1500,
    width=500,
)
fig.show()

Blob count per slot (horizontal layout)

Same data as above but arranged horizontally for easier comparison across longer time ranges. Epochs are stacked in columns, making it easier to see temporal evolution.

Show code
df_pivot = df_slot_in_epoch.pivot(index="slot_in_epoch", columns="time", values="blob_count").fillna(0)
df_slot_pivot = df_slot_in_epoch.pivot(index="slot_in_epoch", columns="time", values="slot").fillna(0)
epoch_lookup = df_slot_in_epoch.drop_duplicates(subset=["time"]).set_index("time")["epoch"].to_dict()

# Parameters
n_columns = 4
n_rows = len(df_pivot.columns)
rows_per_chunk = n_rows // n_columns

# Reshape: stack chunks horizontally
chunks = []
slot_chunks = []
for i in range(n_columns):
    chunk = df_pivot.T.iloc[i*rows_per_chunk:(i+1)*rows_per_chunk, :]
    chunk = chunk.reset_index(drop=True)
    chunks.append(chunk)
    
    slot_chunk = df_slot_pivot.T.iloc[i*rows_per_chunk:(i+1)*rows_per_chunk, :]
    slot_chunk = slot_chunk.reset_index(drop=True)
    slot_chunks.append(slot_chunk)

# Concatenate horizontally (side by side)
df_combined = pd.concat(chunks, axis=1, ignore_index=True)
df_slot_combined = pd.concat(slot_chunks, axis=1, ignore_index=True)

# Build epoch array matching combined layout
epoch_combined = []
for row_idx in range(rows_per_chunk):
    epoch_row = []
    for chunk_idx in range(n_columns):
        time_idx = chunk_idx * rows_per_chunk + row_idx
        if time_idx < len(df_pivot.columns):
            time_val = df_pivot.columns[time_idx]
            epoch_row.extend([epoch_lookup.get(time_val, "")] * len(df_pivot.index))
        else:
            epoch_row.extend([""] * len(df_pivot.index))
    epoch_combined.append(epoch_row)

customdata = np.dstack([df_slot_combined.values, epoch_combined])

# Create x-axis labels with dividers
n_slots = len(df_pivot.index)
x_labels = list(range(n_slots)) * n_columns

y_labels = []
for row_idx in range(rows_per_chunk):
    time_val = df_pivot.columns[row_idx]
    y_labels.append(str(time_val))

fig = go.Figure(
    data=go.Heatmap(
        z=df_combined.values,
        x=list(range(len(df_combined.columns))),
        y=y_labels,
        colorscale="thermal",
        reversescale=True,
        colorbar=dict(
            orientation="h",
            y=-0.15,
            yanchor="top",
            x=0.5,
            xanchor="center",
            len=0.8,
        ),
        customdata=customdata,
        hovertemplate="<b>Slot:</b> %{customdata[0]:.0f}<br><b>Epoch:</b> %{customdata[1]}<br><b>Slot in Epoch:</b> %{x}<br><b>Blob Count:</b> %{z}<extra></extra>",
    )
)

# Add vertical dividers between chunks
for i in range(1, n_columns):
    fig.add_vline(
        x=i * n_slots - 0.5,
        line_width=2,
        line_color="white",
    )

fig.update_layout(
    title="Blob count per slot within epoch",
    xaxis_title="Slot in Epoch",
    yaxis_title="Epoch",
    yaxis=dict(autorange="reversed"),
    xaxis=dict(
        tickvals=list(range(len(df_combined.columns))),
        ticktext=[str(i % n_slots) for i in range(len(df_combined.columns))],
        tickangle=90,
        tickfont=dict(size=6),
    ),
    height=600,
    width=1200,
)

fig.show()