+static CAMLprim value
+copy_lvm_pv (const struct guestfs_lvm_pv *pv)
+{
+ CAMLparam0 ();
+ CAMLlocal2 (rv, v);
+
+ rv = caml_alloc (14, 0);
+ v = caml_copy_string (pv->pv_name);
+ Store_field (rv, 0, v);
+ v = caml_alloc_string (32);
+ memcpy (String_val (v), pv->pv_uuid, 32);
+ Store_field (rv, 1, v);
+ v = caml_copy_string (pv->pv_fmt);
+ Store_field (rv, 2, v);
+ v = caml_copy_int64 (pv->pv_size);
+ Store_field (rv, 3, v);
+ v = caml_copy_int64 (pv->dev_size);
+ Store_field (rv, 4, v);
+ v = caml_copy_int64 (pv->pv_free);
+ Store_field (rv, 5, v);
+ v = caml_copy_int64 (pv->pv_used);
+ Store_field (rv, 6, v);
+ v = caml_copy_string (pv->pv_attr);
+ Store_field (rv, 7, v);
+ v = caml_copy_int64 (pv->pv_pe_count);
+ Store_field (rv, 8, v);
+ v = caml_copy_int64 (pv->pv_pe_alloc_count);
+ Store_field (rv, 9, v);
+ v = caml_copy_string (pv->pv_tags);
+ Store_field (rv, 10, v);
+ v = caml_copy_int64 (pv->pe_start);
+ Store_field (rv, 11, v);
+ v = caml_copy_int64 (pv->pv_mda_count);
+ Store_field (rv, 12, v);
+ v = caml_copy_int64 (pv->pv_mda_free);
+ Store_field (rv, 13, v);
+ CAMLreturn (rv);
+}
+
+static CAMLprim value
+copy_lvm_pv_list (const struct guestfs_lvm_pv_list *pvs)
+{
+ CAMLparam0 ();
+ CAMLlocal2 (rv, v);
+ int i;
+
+ if (pvs->len == 0)
+ CAMLreturn (Atom (0));
+ else {
+ rv = caml_alloc (pvs->len, 0);
+ for (i = 0; i < pvs->len; ++i) {
+ v = copy_lvm_pv (&pvs->val[i]);
+ caml_modify (&Field (rv, i), v);
+ }
+ CAMLreturn (rv);
+ }
+}
+
+static CAMLprim value
+copy_lvm_vg (const struct guestfs_lvm_vg *vg)
+{
+ CAMLparam0 ();
+ CAMLlocal2 (rv, v);
+
+ rv = caml_alloc (19, 0);
+ v = caml_copy_string (vg->vg_name);
+ Store_field (rv, 0, v);
+ v = caml_alloc_string (32);
+ memcpy (String_val (v), vg->vg_uuid, 32);
+ Store_field (rv, 1, v);
+ v = caml_copy_string (vg->vg_fmt);
+ Store_field (rv, 2, v);
+ v = caml_copy_string (vg->vg_attr);
+ Store_field (rv, 3, v);
+ v = caml_copy_int64 (vg->vg_size);
+ Store_field (rv, 4, v);
+ v = caml_copy_int64 (vg->vg_free);
+ Store_field (rv, 5, v);
+ v = caml_copy_string (vg->vg_sysid);
+ Store_field (rv, 6, v);
+ v = caml_copy_int64 (vg->vg_extent_size);
+ Store_field (rv, 7, v);
+ v = caml_copy_int64 (vg->vg_extent_count);
+ Store_field (rv, 8, v);
+ v = caml_copy_int64 (vg->vg_free_count);
+ Store_field (rv, 9, v);
+ v = caml_copy_int64 (vg->max_lv);
+ Store_field (rv, 10, v);
+ v = caml_copy_int64 (vg->max_pv);
+ Store_field (rv, 11, v);
+ v = caml_copy_int64 (vg->pv_count);
+ Store_field (rv, 12, v);
+ v = caml_copy_int64 (vg->lv_count);
+ Store_field (rv, 13, v);
+ v = caml_copy_int64 (vg->snap_count);
+ Store_field (rv, 14, v);
+ v = caml_copy_int64 (vg->vg_seqno);
+ Store_field (rv, 15, v);
+ v = caml_copy_string (vg->vg_tags);
+ Store_field (rv, 16, v);
+ v = caml_copy_int64 (vg->vg_mda_count);
+ Store_field (rv, 17, v);
+ v = caml_copy_int64 (vg->vg_mda_free);
+ Store_field (rv, 18, v);
+ CAMLreturn (rv);
+}
+
+static CAMLprim value
+copy_lvm_vg_list (const struct guestfs_lvm_vg_list *vgs)
+{
+ CAMLparam0 ();
+ CAMLlocal2 (rv, v);
+ int i;
+
+ if (vgs->len == 0)
+ CAMLreturn (Atom (0));
+ else {
+ rv = caml_alloc (vgs->len, 0);
+ for (i = 0; i < vgs->len; ++i) {
+ v = copy_lvm_vg (&vgs->val[i]);
+ caml_modify (&Field (rv, i), v);
+ }
+ CAMLreturn (rv);
+ }
+}
+
+static CAMLprim value
+copy_lvm_lv (const struct guestfs_lvm_lv *lv)
+{
+ CAMLparam0 ();
+ CAMLlocal3 (rv, v, v2);
+
+ rv = caml_alloc (16, 0);
+ v = caml_copy_string (lv->lv_name);
+ Store_field (rv, 0, v);
+ v = caml_alloc_string (32);
+ memcpy (String_val (v), lv->lv_uuid, 32);
+ Store_field (rv, 1, v);
+ v = caml_copy_string (lv->lv_attr);
+ Store_field (rv, 2, v);
+ v = caml_copy_int64 (lv->lv_major);
+ Store_field (rv, 3, v);
+ v = caml_copy_int64 (lv->lv_minor);
+ Store_field (rv, 4, v);
+ v = caml_copy_int64 (lv->lv_kernel_major);
+ Store_field (rv, 5, v);
+ v = caml_copy_int64 (lv->lv_kernel_minor);
+ Store_field (rv, 6, v);
+ v = caml_copy_int64 (lv->lv_size);
+ Store_field (rv, 7, v);
+ v = caml_copy_int64 (lv->seg_count);
+ Store_field (rv, 8, v);
+ v = caml_copy_string (lv->origin);
+ Store_field (rv, 9, v);
+ if (lv->snap_percent >= 0) { /* Some snap_percent */
+ v2 = caml_copy_double (lv->snap_percent);
+ v = caml_alloc (1, 0);
+ Store_field (v, 0, v2);
+ } else /* None */
+ v = Val_int (0);
+ Store_field (rv, 10, v);
+ if (lv->copy_percent >= 0) { /* Some copy_percent */
+ v2 = caml_copy_double (lv->copy_percent);
+ v = caml_alloc (1, 0);
+ Store_field (v, 0, v2);
+ } else /* None */
+ v = Val_int (0);
+ Store_field (rv, 11, v);
+ v = caml_copy_string (lv->move_pv);
+ Store_field (rv, 12, v);
+ v = caml_copy_string (lv->lv_tags);
+ Store_field (rv, 13, v);
+ v = caml_copy_string (lv->mirror_log);
+ Store_field (rv, 14, v);
+ v = caml_copy_string (lv->modules);
+ Store_field (rv, 15, v);
+ CAMLreturn (rv);
+}
+
+static CAMLprim value
+copy_lvm_lv_list (const struct guestfs_lvm_lv_list *lvs)
+{
+ CAMLparam0 ();
+ CAMLlocal2 (rv, v);
+ int i;
+
+ if (lvs->len == 0)
+ CAMLreturn (Atom (0));
+ else {
+ rv = caml_alloc (lvs->len, 0);
+ for (i = 0; i < lvs->len; ++i) {
+ v = copy_lvm_lv (&lvs->val[i]);
+ caml_modify (&Field (rv, i), v);
+ }
+ CAMLreturn (rv);
+ }
+}
+