Skip to content

FOMO300K, FOMO60K datasets of brain scans

Download on Hugging Face. Description is in the paper A large-scale heterogeneous 3D magnetic resonance
brain imaging dataset for self-supervised learning


FOMO60K

is a subset of FOMO300K that includes 60,529 MRI scans collected from 13,900 MRI sessions across 11,187 subjects, aggregated from 16 publicly available datasets. In contrast to FOMO300K, all scans in FOMO60K were affinely co-registered within each session to the image with the highest spatial resolution. Additionally, each scan was either skull-stripped or defaced (details provided below). Table 3 summarizes the source datasets, including the number of subjects, sessions, and scans, as well as the MRI sequence types, applied preprocessing steps, and dataset licenses.

The preprocessing pipeline for FOMO60K consisted of three main stages: (1) reorienting images to RAS orientation (as performed in FOMO300K), (2) affine co-registration, and (3) skull stripping. First, all scans were reoriented to RAS and affinely co-registered using the mri_coreg command from FreeSurfer 7.4.1, with default parameters. Within each MRI session, scans were aligned to the image with the highest spatial resolution in order to preserve maximal anatomical detail.


FOMO300K

Some are skull-striped, some are not.

After downloading and unzipping to the same original file structure, this is a script that:

  1. Picks one scan per dataset (PT001, PT002, etc.)
  2. Loads and plots a middle slice
  3. So you can visually inspect which datasets have skulls, artifacts,….
"""
FOMO300K Skull Strip Visual Inspector
--------------------------------------
Samples one scan per dataset (PTxxx_DatasetName) from mapping.tsv,
loads a middle axial slice, and plots a grid so you can visually
identify which datasets have skulls vs. are skull-stripped/defaced.

Usage:
    python visualize_skull_check.py \
        --fomo_root /path/to/FOMO300K \
        --mapping   /path/to/FOMO300K/mapping.tsv \
        --out_dir   ./skull_check_plots

    # Optional: only plot specific PT datasets
    python visualize_skull_check.py \
        --fomo_root /path/to/FOMO300K \
        --mapping   /path/to/FOMO300K/mapping.tsv \
        --out_dir   ./skull_check_plots \
        --datasets PT001 PT002 PT005
"""

import argparse
import os
import math
import random
from pathlib import Path
from collections import defaultdict

import numpy as np
import pandas as pd
import nibabel as nib
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec


# ── helpers ──────────────────────────────────────────────────────────────────

def load_middle_slice(nii_path: Path):
    """Return the middle axial slice of a 3-D NIfTI (or first vol of 4-D)."""
    img = nib.load(str(nii_path))
    data = img.get_fdata(dtype=np.float32)

    # Handle 4-D volumes (take first volume)
    if data.ndim == 4:
        data = data[..., 0]

    # Reorient to RAS so axial = last axis
    img_ras = nib.as_closest_canonical(img)
    data = img_ras.get_fdata(dtype=np.float32)
    if data.ndim == 4:
        data = data[..., 0]

    mid = data.shape[2] // 2
    slc = data[:, :, mid]
    return slc


def norm(slc: np.ndarray) -> np.ndarray:
    """Normalise slice to [0, 1] for display."""
    p2, p98 = np.percentile(slc[slc > 0], [2, 98]) if slc.any() else (0, 1)
    slc = np.clip(slc, p2, p98)
    rng = p98 - p2
    if rng == 0:
        return np.zeros_like(slc)
    return (slc - p2) / rng


# ── main ─────────────────────────────────────────────────────────────────────

def main():
    parser = argparse.ArgumentParser(description="Visual skull-strip checker for FOMO300K")
    parser.add_argument("--fomo_root", required=True, help="Root directory of FOMO300K")
    parser.add_argument("--mapping",   required=True, help="Path to mapping.tsv")
    parser.add_argument("--out_dir",   default="./skull_check_plots", help="Output directory for plots")
    parser.add_argument("--datasets",  nargs="*", default=None,
                        help="Subset of dataset prefixes to inspect, e.g. PT001 PT002. Default: all.")
    parser.add_argument("--seed",      type=int, default=42, help="Random seed for scan sampling")
    parser.add_argument("--cols",      type=int, default=6,  help="Number of columns in the output grid")
    args = parser.parse_args()

    random.seed(args.seed)
    fomo_root = Path(args.fomo_root)
    out_dir   = Path(args.out_dir)
    out_dir.mkdir(parents=True, exist_ok=True)

    # ── load mapping ──────────────────────────────────────────────────────────
    print(f"Loading mapping from: {args.mapping}")
    df = pd.read_csv(args.mapping, sep="\t", dtype=str)

    # Normalise column names (strip whitespace)
    df.columns = df.columns.str.strip()

    required = {"dataset", "new_path"}
    missing = required - set(df.columns)
    if missing:
        raise ValueError(f"mapping.tsv is missing columns: {missing}. Found: {list(df.columns)}")

    # ── filter to requested datasets ──────────────────────────────────────────
    all_datasets = sorted(df["dataset"].unique())
    print(f"Found {len(all_datasets)} datasets in mapping.tsv")

    if args.datasets:
        # Allow matching by prefix (e.g. "PT001") or full name
        keep = []
        for ds in all_datasets:
            for pat in args.datasets:
                if ds == pat or ds.startswith(pat):
                    keep.append(ds)
                    break
        datasets = sorted(set(keep))
        print(f"Filtered to {len(datasets)} datasets: {datasets}")
    else:
        datasets = all_datasets

    if not datasets:
        raise ValueError("No datasets matched. Check --datasets argument.")

    # ── sample one scan per dataset ───────────────────────────────────────────
    samples = {}   # dataset_name -> (nii_path, new_path_str)

    for ds in datasets:
        sub = df[df["dataset"] == ds].copy()

        # Prefer T1w / anat scans for clearest skull visibility
        t1_mask = sub["new_path"].str.contains("T1w|t1w|T1|mprage|MPRAGE", na=False)
        sub_t1 = sub[t1_mask]
        pool = sub_t1 if len(sub_t1) > 0 else sub

        row = pool.sample(1, random_state=args.seed).iloc[0]
        rel_path = row["new_path"]   # e.g. sub-01/ses-01/anat/sub-01_ses-01_T1w.nii.gz
        nii_path = fomo_root / ds / rel_path

        samples[ds] = (nii_path, rel_path)

    # ── load slices ───────────────────────────────────────────────────────────
    print(f"\nLoading {len(samples)} scans …")
    slices   = {}   # ds -> np array or None
    statuses = {}   # ds -> "ok" | "missing" | "error"

    for ds, (nii_path, rel) in samples.items():
        if not nii_path.exists():
            print(f"  [MISSING]  {ds}: {nii_path}")
            statuses[ds] = "missing"
            slices[ds]   = None
            continue
        try:
            slc = load_middle_slice(nii_path)
            slices[ds]   = norm(slc)
            statuses[ds] = "ok"
            print(f"  [OK]       {ds}: {nii_path.name}  shape={slc.shape}")
        except Exception as e:
            print(f"  [ERROR]    {ds}: {e}")
            statuses[ds] = f"error: {e}"
            slices[ds]   = None

    # ── plot grid ─────────────────────────────────────────────────────────────
    n      = len(datasets)
    ncols  = args.cols
    nrows  = math.ceil(n / ncols)

    fig_w  = ncols * 3.2
    fig_h  = nrows * 3.5
    fig    = plt.figure(figsize=(fig_w, fig_h), facecolor="#0d0d0d")
    fig.suptitle(
        "FOMO300K · Middle Axial Slice per Dataset\n"
        "(Visual check: with skull = skull visible, defaced = face region blanked, skull-stripped = brain only)",
        color="white", fontsize=11, y=0.995, va="top"
    )

    gs = gridspec.GridSpec(nrows, ncols, figure=fig, hspace=0.45, wspace=0.15)

    for idx, ds in enumerate(datasets):
        row_i = idx // ncols
        col_i = idx % ncols
        ax    = fig.add_subplot(gs[row_i, col_i])
        ax.set_facecolor("#0d0d0d")

        slc = slices[ds]
        if slc is not None:
            ax.imshow(np.rot90(slc), cmap="gray", vmin=0, vmax=1, aspect="equal",
                      interpolation="nearest")
            # Derive a short label: "PT001\nClevelandCCF"
            parts = ds.split("_", 1)
            label = f"{parts[0]}\n{parts[1]}" if len(parts) == 2 else ds
            ax.set_title(label, color="white", fontsize=6.5, pad=2, wrap=True)
        else:
            ax.text(0.5, 0.5, statuses[ds], color="red", fontsize=6,
                    ha="center", va="center", transform=ax.transAxes, wrap=True)
            parts = ds.split("_", 1)
            label = f"{parts[0]}\n{parts[1]}" if len(parts) == 2 else ds
            ax.set_title(label, color="#888", fontsize=6.5, pad=2)

        ax.axis("off")

    # Hide empty cells
    for idx in range(n, nrows * ncols):
        fig.add_subplot(gs[idx // ncols, idx % ncols]).set_visible(False)

    out_path = out_dir / "skull_check_all_datasets.png"
    fig.savefig(str(out_path), dpi=130, bbox_inches="tight",
                facecolor=fig.get_facecolor())
    plt.close(fig)
    print(f"\nSaved grid plot → {out_path}")

    # ── also save a per-scan summary TSV ─────────────────────────────────────
    rows = []
    for ds, (nii_path, rel) in samples.items():
        rows.append({
            "dataset":    ds,
            "sampled_scan": rel,
            "full_path":  str(nii_path),
            "status":     statuses[ds],
        })
    summary_df = pd.DataFrame(rows)
    summary_path = out_dir / "sampled_scans.tsv"
    summary_df.to_csv(str(summary_path), sep="\t", index=False)
    print(f"Saved sample summary  → {summary_path}")
    print("\nDone. Open the PNG to visually identify skull-stripped datasets.")


if __name__ == "__main__":
    main()

The following are skull striped

PT009 BraTS-GEN

PT015 MSD_BrainTumor

PT023 Infant_Development_Brain

PT025 MGH_Wild

PT030 OpenNeuro/ds00022
PT030 OpenNeuro/ds001110
PT030 OpenNeuro/ds001235
PT030 OpenNeuro/ds001339
PT030 OpenNeuro/ds001534
PT030 OpenNeuro/ds001551
PT030 OpenNeuro/ds001832
PT030 OpenNeuro/ds001882
PT030 OpenNeuro/ds002011
PT030 OpenNeuro/ds002076
PT030 OpenNeuro/ds002672
PT030 OpenNeuro/ds002675
PT030 OpenNeuro/ds002748
PT030 OpenNeuro/ds002995
PT030 OpenNeuro/ds003007
PT030 OpenNeuro/ds003340
PT030 OpenNeuro/ds003367
PT030 OpenNeuro/ds003511
PT030 OpenNeuro/ds003716
PT030 OpenNeuro/ds003777
PT030 OpenNeuro/ds003835
PT030 OpenNeuro/ds003972
PT030 OpenNeuro/ds004054
PT030 OpenNeuro/ds004187
PT030 OpenNeuro/ds004286
PT030 OpenNeuro/ds004312
PT030 OpenNeuro/ds004553
PT030 OpenNeuro/ds004564
PT030 OpenNeuro/ds004648
PT030 OpenNeuro/ds004666
PT030 OpenNeuro/ds004692
PT030 OpenNeuro/ds004710
PT030 OpenNeuro/ds004993
PT030 OpenNeuro/ds006188

PT035_Yale_Brain_Mets_Longitudinal

The following do not have full skulls:

PT026 MICA_MICs

PT030 OpenNeuro/ds000228

PT030 OpenNeuro/ds000229

PT030 OpenNeuro/ds001168

PT030 OpenNeuro/ds002606

PT007 ATAG contains files like sub-04_ses-01_run-1_T2starw.nii.gz that upon inspection looks like this

See also  Physics-Informed Neural Networks for Brain Molecular Transport from MRI

Check also

PT030 OpenNeuro/ds001912, PT002 Nigerian_Clinical, PT030 OpenNeuro/ds002367, PT030 OpenNeuro/ds003466, PT030 OpenNeuro/ds003763, PT030 OpenNeuro/ds003798, PT030 OpenNeuro/ds003836, PT030 OpenNeuro/ds003949, PT030 OpenNeuro/ds003967, PT030 OpenNeuro/ds003990, PT030 OpenNeuro/ds004798, PT030 OpenNeuro/ds004889, PT030 OpenNeuro/ds005205, PT030 OpenNeuro/ds005075, PT030 OpenNeuro/ds005138, PT030 OpenNeuro/ds005576, 

before inclusion to training data.

Some of the heads are a bit rotated. For example, PT030 OpenNeuro/ds001984, PT030 OpenNeuro/ds002006, PT030 OpenNeuro/ds002155, PT030 OpenNeuro/ds002711, PT030 OpenNeuro/ds002715, …

Script to copied data to a new folder

"""
FOMO300K - Copy scans WITH skulls, 3D only, no DWI, to processed directory
---------------------------------------------------------------------------
Applies three filters using mapping.tsv + mri_info.tsv:
  1. Exclude skull-stripped / defaced datasets (visual inspection list)
  2. Exclude DWI scans   (path contains /dwi/ OR filename matches dwi pattern)
  3. Keep only 3D scans  (MRAcquisitionType == "3D" in mri_info.tsv;
                          scans missing from mri_info are kept by default
                          unless --strict_3d flag is set)
    # Also drop scans with no MRAcquisitionType metadata entry

                          
Usage:
    # Dry run first (no files copied, just prints counts)
python copy_with_skulls.py \
    --fomo_root FOMO300k/raw \
    --mapping   FOMO300k/raw/mapping.tsv \
    --mri_info  FOMO300k/raw/mri_info.tsv \
    --out_root  FOMO300k/cultivated \
    --dry_run \
    --strict_3d

    # Actual copy with 16 parallel threads
python copy_with_skulls.py \
    --fomo_root FOMO300k/raw \
    --mapping   FOMO300k/raw/mapping.tsv \
    --mri_info  FOMO300k/raw/mri_info.tsv \
    --out_root  FOMO300k/cultivated \
    --n_workers 16 \
    --strict_3d
        

"""

import argparse
import os
import shutil
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path

import pandas as pd


# ── Skull-stripped datasets to EXCLUDE ───────────────────────────────────────

SKULL_STRIPPED_EXACT = {
    "PT009_BraTS-GEN",
    "PT015_MSD_BrainTumor",
    "PT023_Infant_Development_Brain",
    "PT025_MGH_Wild",
    "PT026_MICA_MICs",
    "PT002_Nigerian_Clinical",
    # OpenNeuro sub-datasets where ds-ID is already in the dataset column
    "PT030_OpenNeuro_ds000022",
    "PT030_OpenNeuro_ds000228",
    "PT030_OpenNeuro_ds000229",
    "PT030_OpenNeuro_ds001110",
    "PT030_OpenNeuro_ds001168",
    "PT030_OpenNeuro_ds001235",
    "PT030_OpenNeuro_ds001339",
    "PT030_OpenNeuro_ds001534",
    "PT030_OpenNeuro_ds001551",
    "PT030_OpenNeuro_ds001832",
    "PT030_OpenNeuro_ds001882",
    "PT030_OpenNeuro_ds001912",
    "PT030_OpenNeuro_ds002076",
    "PT030_OpenNeuro_ds002201",
    "PT030_OpenNeuro_ds002367",
    "PT030_OpenNeuro_ds002606",
    "PT030_OpenNeuro_ds002672",
    "PT030_OpenNeuro_ds002675",
    "PT030_OpenNeuro_ds002748",
    "PT030_OpenNeuro_ds002995",
    "PT030_OpenNeuro_ds003007",
    "PT030_OpenNeuro_ds003340",
    "PT030_OpenNeuro_ds003367",
    "PT030_OpenNeuro_ds003466",
    "PT030_OpenNeuro_ds003511",
    "PT030_OpenNeuro_ds003716",
    "PT030_OpenNeuro_ds003763",
    "PT030_OpenNeuro_ds003777",
    "PT030_OpenNeuro_ds003798",
    "PT030_OpenNeuro_ds003835",
    "PT030_OpenNeuro_ds003836",
    "PT030_OpenNeuro_ds003949",
    "PT030_OpenNeuro_ds003967",
    "PT030_OpenNeuro_ds003972",
    "PT030_OpenNeuro_ds003990",
    "PT030_OpenNeuro_ds004054",
    "PT030_OpenNeuro_ds004187",
    "PT030_OpenNeuro_ds004286",
    "PT030_OpenNeuro_ds004312",
    "PT030_OpenNeuro_ds004553",
    "PT030_OpenNeuro_ds004564",
    "PT030_OpenNeuro_ds004648",
    "PT030_OpenNeuro_ds004666",
    "PT030_OpenNeuro_ds004692",
    "PT030_OpenNeuro_ds004710",
    "PT030_OpenNeuro_ds004798",
    "PT030_OpenNeuro_ds004889",
    "PT030_OpenNeuro_ds004993",
    "PT030_OpenNeuro_ds005075",
    "PT030_OpenNeuro_ds005138",
    "PT030_OpenNeuro_ds005205",
    "PT030_OpenNeuro_ds005576",
    "PT030_OpenNeuro_ds006188",
    "PT035_Yale_Brain_Mets_Longitudinal"
}

# Fallback: ds-IDs to check inside new_path when the dataset column
# is just "PT030_OpenNeuro" (without the ds-ID suffix)
SKULL_STRIPPED_OPENNEURO_IDS = {
    "ds000022", "ds000228", "ds000229", "ds001110", "ds001168", "ds001235",
    "ds001339", "ds001534", "ds001551", "ds001832", "ds001882", "ds001912",
    "ds002076", "ds002201", "ds00201",
    "ds002367", "ds002606", "ds002672", "ds002675", "ds002748", "ds002995",
    "ds003007", "ds003340", "ds003367", "ds003466", "ds003511", "ds003716",
    "ds003763", "ds003777", "ds003798", "ds003835", "ds003836", "ds003949",
    "ds003967", "ds003972", "ds003990", "ds004054", "ds004187", "ds004286",
    "ds004312", "ds004553", "ds004564", "ds004648", "ds004666", "ds004692",
    "ds004710", "ds004798", "ds004889", "ds004993", "ds005075", "ds005138",
    "ds005205", "ds005576", "ds006188",
}


# ── Filter functions ──────────────────────────────────────────────────────────

def is_skull_stripped(row: pd.Series) -> bool:
    ds = str(row["dataset"]).strip()
    if ds in SKULL_STRIPPED_EXACT:
        return True
    # Fallback for "PT030_OpenNeuro" without ds-ID in column name
    if "openneuro" in ds.lower() or ds.startswith("PT030"):
        path_str = str(row.get("new_path", "")).lower()
        for dsid in SKULL_STRIPPED_OPENNEURO_IDS:
            if dsid.lower() in path_str:
                return True
    return False


def is_dwi(new_path: str) -> bool:
    """Return True for DWI / diffusion scans that should be excluded."""
    p = new_path.lower()
    return (
        "/dwi/" in p
        or "\\dwi\\" in p
        or p.endswith("_dwi.nii.gz")
        or p.endswith("_dwi.nii")
        or "_dwi_" in p
        or "/dif/" in p
        or "_adc" in p
        or "_trace" in p
        or "_b0." in p
        or "_b1000" in p
    )


# ── File copy helper ──────────────────────────────────────────────────────────

def copy_file(src: Path, dst: Path, dry_run: bool) -> str:
    if not src.exists():
        return f"MISSING\t{src}"
    if dst.exists():
        return f"SKIP_EXISTS\t{dst}"
    if not dry_run:
        dst.parent.mkdir(parents=True, exist_ok=True)
        shutil.copy2(src, dst)
    return f"{'DRY_RUN' if dry_run else 'COPIED'}\t{dst}"


# ── Main ──────────────────────────────────────────────────────────────────────

def main():
    parser = argparse.ArgumentParser(
        description="Copy skull-intact, 3D-only, non-DWI FOMO300K scans to processed dir"
    )
    parser.add_argument("--fomo_root",  required=True, help="Raw FOMO300K root directory")
    parser.add_argument("--mapping",    required=True, help="Path to mapping.tsv")
    parser.add_argument("--mri_info",   required=True, help="Path to mri_info.tsv")
    parser.add_argument("--out_root",   required=True, help="Output processed root directory")
    parser.add_argument("--dry_run",    action="store_true",
                        help="Print what would be done without copying any files")
    parser.add_argument("--strict_3d",  action="store_true",
                        help="Also exclude scans with no MRAcquisitionType entry in mri_info")
    parser.add_argument("--n_workers",  type=int, default=8,
                        help="Number of parallel copy threads (default: 8)")
    args = parser.parse_args()

    fomo_root = Path(args.fomo_root)
    out_root  = Path(args.out_root)

    print(f"FOMO root  : {fomo_root}")
    print(f"Output     : {out_root}")
    print(f"Dry run    : {args.dry_run}")
    print(f"Strict 3D  : {args.strict_3d}")
    print(f"Workers    : {args.n_workers}")
    print()

    # ── load mapping.tsv ──────────────────────────────────────────────────────
    print("Loading mapping.tsv …")
    df = pd.read_csv(args.mapping, sep="\t", dtype=str)
    df.columns = df.columns.str.strip()
    print(f"  Total rows: {len(df):,}  datasets: {df['dataset'].nunique()}")

    # ── load mri_info.tsvbuild (dataset, filename) -> MRAcquisitionType ───
    print("Loading mri_info.tsv …")
    mi = pd.read_csv(args.mri_info, sep="\t", dtype=str)
    mi.columns = mi.columns.str.strip()
    acq_lookup = {}
    if "MRAcquisitionType" in mi.columns and "filename" in mi.columns:
        for _, r in mi.iterrows():
            key = (str(r["dataset"]).strip(), str(r["filename"]).strip())
            acq_lookup[key] = str(r.get("MRAcquisitionType", "")).strip().upper()
    print(f"  Total rows: {len(mi):,}  lookup entries: {len(acq_lookup):,}")
    print()

    # ── Filter 1: skull-stripped datasets ────────────────────────────────────
    mask_skull = df.apply(is_skull_stripped, axis=1)
    n_skull    = mask_skull.sum()
    df = df[~mask_skull].copy()
    print(f"[Filter 1] Skull-stripped removed : {n_skull:,}  → {len(df):,} remaining")

    # ── Filter 2: DWI scans ───────────────────────────────────────────────────
    mask_dwi = df["new_path"].apply(is_dwi)
    n_dwi    = mask_dwi.sum()
    df = df[~mask_dwi].copy()
    print(f"[Filter 2] DWI removed            : {n_dwi:,}  → {len(df):,} remaining")

    # ── Filter 3: 3D only ─────────────────────────────────────────────────────
    def get_acq(row):
        key = (str(row["dataset"]).strip(), str(row["new_path"]).strip())
        return acq_lookup.get(key, "UNKNOWN")

    df["_acq_type"] = df.apply(get_acq, axis=1)

    n_3d      = (df["_acq_type"] == "3D").sum()
    n_2d      = (df["_acq_type"] == "2D").sum()
    n_unknown = (df["_acq_type"] == "UNKNOWN").sum()
    print(f"[Filter 3] Acquisition breakdown  : 3D={n_3d:,}  2D={n_2d:,}  "
          f"unknown={n_unknown:,}")

    if args.strict_3d:
        mask_keep = df["_acq_type"] == "3D"
        print(f"           strict_3d=True → keeping 3D only, dropping unknown")
    else:
        mask_keep = df["_acq_type"].isin(["3D", "UNKNOWN"])
        print(f"           strict_3d=False → keeping 3D + unknown "
              f"(use --strict_3d to also drop unknown)")

    n_removed_2d = (~mask_keep).sum()
    df = df[mask_keep].copy()
    print(f"           2D removed             : {n_removed_2d:,}  → {len(df):,} remaining")
    print()
    print(f"Final: {len(df):,} scans from {df['dataset'].nunique()} datasets")
    print()

    # ── Build copy job list ───────────────────────────────────────────────────
    jobs = []
    for _, row in df.iterrows():
        ds       = str(row["dataset"]).strip()
        new_path = str(row["new_path"]).strip()
        src = fomo_root / ds / new_path
        dst = out_root  / ds / new_path
        jobs.append((src, dst))

    if args.dry_run:
        print(f"DRY RUN — would copy {len(jobs):,} files. Re-run without --dry_run to copy.")
        print("\nDone.")
        return

    # ── Parallel copy ─────────────────────────────────────────────────────────
    out_root.mkdir(parents=True, exist_ok=True)
    log_path = out_root / "copy_log.tsv"
    results  = {"COPIED": 0, "SKIP_EXISTS": 0, "MISSING": 0, "ERROR": 0}

    with open(str(log_path), "w") as log_f:
        log_f.write("status\tpath\n")
        with ThreadPoolExecutor(max_workers=args.n_workers) as pool:
            futures = {
                pool.submit(copy_file, src, dst, False): (src, dst)
                for src, dst in jobs
            }
            done = 0
            for future in as_completed(futures):
                done += 1
                try:
                    result = future.result()
                    status = result.split("\t")[0]
                    results[status] = results.get(status, 0) + 1
                    log_f.write(result + "\n")
                except Exception as e:
                    results["ERROR"] += 1
                    src, dst = futures[future]
                    log_f.write(f"ERROR\t{dst}\t{e}\n")

                if done % 5000 == 0 or done == len(jobs):
                    print(f"  Progress: {done:,}/{len(jobs):,}  "
                          f"copied={results['COPIED']:,}  "
                          f"skipped={results['SKIP_EXISTS']:,}  "
                          f"missing={results['MISSING']:,}  "
                          f"errors={results['ERROR']:,}")

    # ── Write filtered metadata ───────────────────────────────────────────────
    print("\nWriting metadata files …")

    df.drop(columns=["_acq_type"], errors="ignore").to_csv(
        str(out_root / "mapping.tsv"), sep="\t", index=False
    )
    print(f"  Wrote mapping.tsv ({len(df):,} rows)")

    keep_keys   = set(zip(df["dataset"], df["new_path"]))
    mi_filtered = mi[
        mi.apply(
            lambda r: (str(r["dataset"]).strip(), str(r["filename"]).strip()) in keep_keys,
            axis=1
        )
    ]
    mi_filtered.to_csv(str(out_root / "mri_info.tsv"), sep="\t", index=False)
    print(f"  Wrote mri_info.tsv ({len(mi_filtered):,} rows)")

    src_p = fomo_root / "participants.tsv"
    if src_p.exists():
        shutil.copy2(src_p, out_root / "participants.tsv")
        print(f"  Copied participants.tsv")

    # ── Summary ───────────────────────────────────────────────────────────────
    print("\n── Summary ──────────────────────────────────────────────────────────")
    for k, v in results.items():
        print(f"  {k:<15} {v:,}")
    print(f"\n  Log → {log_path}")
    print("\nDone.")


if __name__ == "__main__":
    main()

PT003_CUNMET contains perfusion data, which can be deleted with

find FOMO300k/processed/PT003_CUNMET/ \
  -type d -name "perf" -exec rm -rf {} +

Leave a Reply

error: Content is protected !!