@phdthesis{Vidal2008PhD,
  author = {F. P. Vidal},
  title = {Simulation of image guided needle puncture: contribution to real-time ultrasound and fluoroscopic rendering, and volume haptic rendering},
  year = 2008,
  month = jan,
  school = {School of Computer Science, Bangor University, UK},
  abstract = {The potential for the use of computer graphics in medicine has been well established. An important emerging area is the provision of training tools for interventional radiology (IR) procedures. These are minimally invasive, targeted treatments performed using imaging for guidance. Training of the skills required in IR is an apprenticeship which still consists of close supervision using the model i) see one, ii) do one and iii) teach one. Simulations of guidewire and catheter insertion for IR are already commercially available. However, training of needle guidance using ultrasound (US), fluoroscopic or computed tomography (CT) images - the first step in approximately half of all IR procedures - has been largely overlooked and we have developed a simulator, called BIGNePSi, to provide training for this commonly performed procedure.

This thesis is devoted to the development of novel techniques to provide an integrated visual-haptic system for the simulation of US guided needle puncture using patient specific data with 3D textures and volume haptics. The result is the realization of a cost effective training tool, using off-the-shelf components (visual displays, haptic devices and working stations), that delivers a high fidelity training experience.

We demonstrate that the proxy-based haptic rendering method can be extended to use volumetric data so that the trainee can feel underlying structures, such as ribs and bones, whilst scanning the surface of the body with a virtual US transducer. A volume haptic model is also proposed that implements an effective model of needle puncture that can be modulated by using actual force measurements. A method of approximating US-like images from CT data sets is also described. We also demonstrate how to exploit today's graphics cards to achieve physically-based simulation of x-ray images using GPU programming and 3D texture hardware. We also demonstrate how to use GPU programming to modify, at interactive framerates, the content of 3D~textures to include the needle shaft and also to artificially add a tissue lesion into the dataset of a specific patient. This enables the clinician to provide students with a wide variety of training scenarios.

Validation of the simulator is critical to its eventual uptake in a training curriculum and a project such as this cannot be undertaken without close co-operation with the domain experts. Hence this project has been undertaken within a multi-disciplinary collaboration involving practising interventional radiologists and computer scientists of the Collaborators in Radiological Interventional Virtual Environments (CRaIVE) consortium. The cognitive task analysis (CTA) for freehand US guided biopsy performed by our psychologist partners has been extensively used to guide the design of the simulator. In addition, to ensure that the fidelity of the simulator is at an acceptable level, clinical validation of the system's content has been carried out at each development stage. In further, objective evaluations, questionnaires were developed to evaluate the features and the performances of the simulator. These were distributed to trainees and experts at different workshops. Many suggestions for improvements were collected and subsequently integrated into the simulator.},
  pdf = {pdf/Vidal2008PhD.pdf}
}
@mastersthesis{Vidal2003IS,
  author = {F. P. Vidal},
  title = {Modelling the response of X-ray detectors and removing artefacts in {3D} tomography},
  year = 2003,
  month = sep,
  school = {\'Ecole doctorale \'Electronique, \'Electrotechnique, Automatique, INSA de Lyon, France},
  abstract = {This work presents a method for modelling the response of X-ray detectors applied to remove artefacts in tomography.
	On some reconstructed volumes by tomography using synchrotron radiations at ESRF, dark line artefacts (under-estimation of linear attenuation coefficients) appear when high density material are aligned.
	The causes of these artefacts have been determined using experimental and simulation methods; then simulated artefacts were removed.
	Finally two different causes are highlighted in this study. One of them is the impulse response of the used detector, the Frelon camera of ID19 beamline. An iterative fixed point algorithm has been used successfully to remove simulated artefacts on tomographic slices.},
  keywords = {tomography; X-ray; detectors response modelling; artefacts; simulation; deconvolution},
  pdf = {pdf/Vidal2003IS.pdf}
}
@mastersthesis{Vidal2002CAGTA,
  author = {F. P. Vidal},
  title = {Constructing a {GUI} using {3D} reconstruction for a radiographer's training tool},
  year = 2002,
  month = sep,
  school = {School of Computing and Mathematics, University of Teesside, UK},
  abstract = {This project presents the GUI for a radiographer's training tool. It is one of the two parts of a project idea proposed by Senior Lecturer, Philip Cosson, from the School of Health and Social Care at the University of Teesside. His wish was to develop a program, which would allow students to train themselves to take X-Ray images without exposing patients to X-rays. It has been divided into two different projects, the GUI and the X-Ray rendering. There are two parts to the GUI system, a 3D reconstruction and the setting of the radiography parameters via the GUI. Volumetric data, obtained by MR or CT scanners, is stored, slice by slice, in a DICOM file, the medical imaging file standard. The Papyrus toolkit, developed at the University Hospital of Geneva, is used to read DICOMDIR files and DICOM files, which contain medical images. Before 3D reconstruction, information is extracted and a segmentation algorithm detects bones and skin on the different images of the dataset. From these data and using the marching cube algorithm, a 3D model is created. The GUI lets users select different datasets. Users can set the position and the orientation of the 3D reconstructed objects. They are able to choose the corresponding cassette and to move the X-Ray source. The GUI communicates, via an agreed protocol, all settings of the scene to the other part of the project, the X-Ray renderer. The GUI project is written in C++ using the WIN32 API and OpenGL.},
  pdf = {pdf/Vidal2002CAGTA.pdf}
}

This file was generated by bibtex2html 1.97.